diff --git a/.gitattributes b/.gitattributes index e7740246b40ed9ca32ac4ad189b43da3113bb870..8f7d6c8c5c1bbc4dc68d8adb4350d38911c336e5 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1204,3 +1204,11 @@ data/2025/2504_12xxx/2504.12328/9c302798-2685-4174-a5ed-3bbb745d3206_origin.pdf data/2025/2504_12xxx/2504.12329/4941f0c5-6d68-47ad-82a7-7d2f05fe7a51_origin.pdf filter=lfs diff=lfs merge=lfs -text data/2025/2504_12xxx/2504.12330/95083e69-e69b-4b7a-8e68-e3176f342c04_origin.pdf filter=lfs diff=lfs merge=lfs -text data/2025/2504_13xxx/2504.13192/73de2f4e-a687-4354-97b1-12546e4883e7_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_08xxx/2504.08736/eda122d9-2e32-4d6c-a34a-34f5d8cbbcb6_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_08xxx/2504.08942/eb06eaaa-dd2d-4310-8824-a39ee9e3457c_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_08xxx/2504.08966/d7a19180-6a88-4095-b8c3-57d4a8694136_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_09xxx/2504.09037/7cd60625-0d0d-4f3d-b38f-7ab687b11e36_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_09xxx/2504.09130/c8e9b00e-528a-4dec-8df2-9d7d8a1767a5_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_09xxx/2504.09153/041c4cb9-df29-4fd7-8ac4-53350c684566_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_09xxx/2504.09228/4d91038b-08d1-447d-8037-8ee551148a4c_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_09xxx/2504.09246/056a1ec9-5612-40a0-bb59-6e9ca9d014d6_origin.pdf filter=lfs diff=lfs merge=lfs -text diff --git a/data/2025/2504_08xxx/2504.08736/eda122d9-2e32-4d6c-a34a-34f5d8cbbcb6_content_list.json b/data/2025/2504_08xxx/2504.08736/eda122d9-2e32-4d6c-a34a-34f5d8cbbcb6_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..e160e18495cd8c6ee91d19d5336e85d6d08bb5e7 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/eda122d9-2e32-4d6c-a34a-34f5d8cbbcb6_content_list.json @@ -0,0 +1,2429 @@ +[ + { + "type": "text", + "text": "GigaTok: Scaling Visual Tokenizers to 3 Billion Parameters for Autoregressive Image Generation", + "text_level": 1, + "bbox": [ + 199, + 130, + 799, + 176 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Tianwei Xiong", + "bbox": [ + 133, + 202, + 261, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jun Hao Liew2", + "bbox": [ + 303, + 203, + 424, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zilong Huang", + "bbox": [ + 465, + 203, + 584, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jiashi Feng2", + "bbox": [ + 627, + 203, + 725, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xihui Liu $^{1\\dagger}$", + "bbox": [ + 767, + 203, + 861, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1The University of Hong Kong", + "bbox": [ + 274, + 220, + 517, + 239 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "2ByteDance Seed", + "bbox": [ + 583, + 220, + 723, + 238 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Project page: https://silentview.github.io/GigaTok/", + "bbox": [ + 297, + 239, + 694, + 256 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 248, + 291, + 325, + 306 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In autoregressive (AR) image generation, visual tokenizers compress images into compact discrete latent tokens, enabling efficient training of downstream autoregressive models for visual generation via next-token prediction. While scaling visual tokenizers improves image reconstruction quality, it often degrades downstream generation quality—a challenge not adequately addressed in existing literature. To address this, we introduce GigaTok, the first approach to simultaneously improve image reconstruction, generation, and representation learning when scaling visual tokenizers. We identify the growing complexity of latent space as the key factor behind the reconstruction vs. generation dilemma. To mitigate this, we propose semantic regularization, which aligns tokenizer features with semantically consistent features from a pre-trained visual encoder. This constraint prevents excessive latent space complexity during scaling, yielding consistent improvements in both reconstruction and downstream autoregressive generation. Building on semantic regularization, we explore three key practices for scaling tokenizers: (1) using 1D tokenizers for better scalability, (2) prioritizing decoder scaling when expanding both encoder and decoder, and (3) employing entropy loss to stabilize training for billion-scale tokenizers. By scaling to 3 billion parameters, GigaTok achieves state-of-the-art performance in reconstruction, downstream AR generation, and downstream AR representation quality.", + "bbox": [ + 89, + 323, + 483, + 717 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 91, + 744, + 220, + 760 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Autoregressive (AR) language models (LM) have emerged as a promising approach for visual generation [15, 50, 66, 69], driven by their proven scalability [2, 5, 14, 19, 37, 51, 52, 54, 55] and the potential for unified multimodal modeling [12, 45, 62]. The AR image generation framework consists of a visual tokenizer and a downstream AR generator. The tokenizer encodes images into discrete tokens, trained", + "bbox": [ + 89, + 770, + 482, + 876 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/b705830c6e5c612a3f65b4102bf1f7f236bd0886aafb32e0701e980d97987c13.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 519, + 292, + 622, + 366 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/274370f0250037eda8667d5c1a5297451a24c1e086f672ad0122c6e9cf19af1a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 625, + 295, + 694, + 364 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/4a851190f391968fff60efe2758f19f4554b97d7becbd7c5a587a2444a37eb3c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 699, + 295, + 756, + 364 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/87c6f73875da6f2e2c33fde3ca1f08f894833374e305624ad1c2334895a90f12.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 761, + 295, + 828, + 364 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/77cf18f6ca5c4e6b0b0a0e25d4e49d3417732c35d65185fce0b503e3a22f5c32.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 830, + 295, + 897, + 364 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/353f2db32bb48dc55d7ae5715b753a481a90eeff571048278ccfe5c6dbf05fa4.jpg", + "image_caption": [ + "Figure 1. Reconstruction vs. generation dilemma: Naively scaling visual tokenizers achieves better reconstruction but degrades downstream autoregressive (AR) generation. In contrast, GigaTok achieves better performance for both reconstruction and generation as tokenizers scale up." + ], + "image_footnote": [], + "bbox": [ + 519, + 371, + 622, + 494 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/bb78f583482a5c116fc43be783712b1b6b60655b1a1addd0864eeffddc52cd97.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 627, + 371, + 689, + 422 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/9a5879ed81b192743b97092ab73ca4b3a7b630621c05af234a340dc12fc53b01.jpg", + "image_caption": [ + "Better generation with larger tokenizer" + ], + "image_footnote": [], + "bbox": [ + 692, + 371, + 756, + 492 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/67606d8a17a8e261fbda24cf1eb68ba0a5931c3eb125b78425daa2c2a6b086a8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 759, + 371, + 826, + 492 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/ed48fc0f3f77e7ab087ee9a1c9566f902d55b4209f29ae02a5ae2ec1ca6117c6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 830, + 371, + 897, + 492 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "with image reconstruction supervision, while the AR generator models the distribution of these discrete tokens through next-token prediction. The image tokenizer plays a pivotal role in AR visual generation, providing a compact and expressive latent space that enables effective generative modeling by downstream AR models.", + "bbox": [ + 511, + 607, + 905, + 698 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Despite its pivotal role, scaling of visual tokenizer is rarely explored in the literature. In fact, unlike the downstream AR models whose scalability has been widely validated [12, 30, 60, 62], scaling the visual tokenizer presents a significant challenge. Specifically, there exists a reconstruction vs. generation dilemma, where scaling tokenizer improves reconstruction fidelity but degrades downstream generation quality, as shown in Fig. 1. This dilemma is also observed in prior works [13, 21]. In this work, we seek to overcome this limitation and explore strategies for effectively scaling tokenizers to enhance both reconstruction and generation performance.", + "bbox": [ + 511, + 700, + 906, + 882 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To investigate the root cause of this dilemma, we propose", + "bbox": [ + 532, + 885, + 903, + 901 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.08736v2 [cs.CV] 24 Aug 2025", + "bbox": [ + 22, + 273, + 58, + 724 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "† Corresponding Author.", + "bbox": [ + 91, + 886, + 223, + 898 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/4a8ec5874098be40d99d09fbcc46ef153fce31d4907340b386d0ec15c364716f.jpg", + "image_caption": [ + "Figure 2. The 2.9B GigaTok achieves SOTA autoregressive image generation with a 1.4B AR model on ImageNet $256\\times 256$ resolution." + ], + "image_footnote": [], + "bbox": [ + 94, + 90, + 903, + 300 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "an AR probing scheme that trains a lightweight downstream generative AR model to monitor the tokenizer's training process. Surprisingly, we find that as tokenizers scale, the downstream AR model struggles more to learn the resulting token distribution, as evidenced by the increasing AR generation loss. This suggests that the larger tokenizers produce a more complex token space, making it increasingly difficult for AR models to learn effectively.", + "bbox": [ + 89, + 354, + 482, + 474 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address this challenge, we introduce pre-trained visual representation models (e.g. DINOv2 [43]) to regularize tokenizers. Specifically, we leverage a semantic regularization loss during tokenizer training, encouraging high similarity between tokenizer features and the pre-trained model features. Such regularization helps constrain the latent space complexity, preventing the tokenizer from learning overly complicated latent token dependencies that hinder downstream AR generative modeling. Moreover, we design a vector-quantized (VQ) tokenizer with a hybrid CNN-Transformer architecture as the backbone, suitable for both 1D and 2D tokenizers, and explore best practices for scaling tokenizers: (1) 1D tokenizers exhibit better scalability compared to 2D tokenizers; (2) Asymmetric model scaling, prioritizing decoder scaling over encoder scaling, proves effective; (3) Entropy loss [69] becomes crucial for convergence when training tokenizers with billion-level parameters. With our semantic regularization and three key scaling strategies, we effectively scale GigaTok to 3 billion parameters, overcoming the reconstruction vs. generation dilemma.", + "bbox": [ + 89, + 477, + 482, + 779 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We summarize our contributions as follows:", + "bbox": [ + 109, + 779, + 401, + 792 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We identify that the reconstruction vs. generation dilemma in tokenizer scaling stems from increased latent space complexity in larger tokenizers. To address this, we propose semantic regularization, effectively mitigating the dilemma and enabling tokenizer scaling.", + "- We explore best practices for scaling tokenizers, including 1D tokenizers with hybrid CNN-Transformer archi" + ], + "bbox": [ + 89, + 795, + 483, + 901 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "tecture, asymmetric encoder-decoder scaling, and entropy loss for billion-scale tokenizers.", + "bbox": [ + 526, + 354, + 906, + 383 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- Our GigaTok is the first tokenizer scaled to 3B, achieving state-of-the-art reconstruction, downstream AR generation, and downstream AR representation on ImageNet.", + "bbox": [ + 513, + 385, + 906, + 431 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 513, + 448, + 653, + 464 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Image tokenizers. Image tokenizers map image inputs into discrete [15, 56, 66] or continuous [28] tokens which can be modeled by downstream generative models. For discrete tokenizers, Vector Quantization (VQ) [15, 56, 66] is dominantly adopted. Recently, new quantization methods [49, 69, 75, 76] have also been proposed for better scaling of codebook size. However, how to properly scale up tokenizer models is insufficiently studied in existing literature. ViT-VQGAN [66] and TiTok [70] utilize transformer architecture to enable convenient scaling of tokenizers, but end up training their best generative models on smaller tokenizer versions. A concurrent work, ViTok [76], suggests de-prioritizing VAE scaling due to its less predictable effect for downstream diffusion models. We observe a similar reconstruction vs. generation dilemma in scaling discrete tokenizers, and provide our analysis and solution to it.", + "bbox": [ + 509, + 474, + 906, + 715 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Autoregressive Visual Generation. Autoregressive visual generative models [33, 38, 40, 49, 50, 56, 58, 60, 66] follow the next-token-prediction (NTP) approach of LLMs, enabling the leverage of advancements in LLMs and simplifying the path to unified multi-modal generation. Other variants utilize visual-specific paradigms such as mask image modeling [8, 61, 69, 70] and next-scale-prediction [36, 53] for better performance. We reveal that scaling tokenizers helps NTP AR models to be comparable to these variants.", + "bbox": [ + 511, + 718, + 908, + 853 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Semantic Guidance for Visual Generative Models and Tokenizers. The guidance from visual foundation models [7, 23, 43, 46, 72] has been used to improve training", + "bbox": [ + 511, + 854, + 908, + 902 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "convergence speed and quality [65, 71] of visual generative models, as well as enhancing representation quality or downstream performance of visual tokenizers [9, 10, 18, 36, 41, 59, 63–65, 68, 73, 76, 77]. REPA [71] presents impressive performance improvements brought by a simple representation alignment strategy, and recently, VA-VAE [65] shows the significant benefits of semantic guidance to the reconstruction-generation Pareto Frontier of VAEs. Different from existing work, GigaTok novelly reveals the critical role of semantic regularization for resolving the reconstruction vs. generation dilemma in scaling visual tokenizers.", + "bbox": [ + 89, + 90, + 483, + 257 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Pilot Study", + "text_level": 1, + "bbox": [ + 89, + 270, + 209, + 286 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We first introduce AR Probing as a proxy to effectively monitor the tokenizer's effectiveness for downstream generation (Sec 3.1), followed by a pilot experiment that investigates the reconstruction vs. generation challenges when naively scaling visual tokenizers (Sec 3.2).", + "bbox": [ + 89, + 295, + 483, + 371 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. AR Probing for Tokenizer Evaluation", + "text_level": 1, + "bbox": [ + 89, + 380, + 413, + 395 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In autoregressive visual generation, the training of the tokenizer and downstream AR model are performed in separate stages. In the first stage, a visual tokenizer is trained to compress images into discrete tokens, optimized with reconstruction objective. In the second stage, the downstream generative model is trained based on the discrete tokens from the pre-trained tokenizer. However, a tokenizer that performs well in terms of reconstruction fidelity in the first stage may not necessarily lead to better performance for downstream generative models. Thus, it is crucial to evaluate the effectiveness of the trained tokenizers for downstream generation alongside its reconstruction quality.", + "bbox": [ + 89, + 402, + 483, + 583 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Despite its importance, assessing how a tokenizer influences downstream generation models can be computationally expensive. For example, sufficiently training a 343M parameter downstream AR generator takes 170 hours on 64 V100 GPUs. To address this challenge, we introduce AR Probing, inspired by Linear Probing in representation learning literature [11, 23]. The key idea is to use the performance of a small AR model as a proxy to reflect the performance trends of large-scale AR models.", + "bbox": [ + 89, + 584, + 483, + 718 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Specifically, we use the tokenizer to train a small Llama-style model [50, 54] (111M parameters) for 50 epochs, and evaluate its gFID [24], validation loss, and linear probing accuracy [11, 23] for a fair comparison between different tokenizers. Training the proposed AR Probing model for evaluating tokenizers is $10 \\times$ more efficient than training the original 343M downstream AR model. Our experiments in Sec. 5.1 (Fig. 6) demonstrate that the trends observed with AR Probing align with the performance of the large-scale AR models after sufficient training.", + "bbox": [ + 89, + 719, + 483, + 869 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "gFID. The generation FID [24] of AR probing indicates the overall image generation performance of the two-stage", + "bbox": [ + 89, + 869, + 483, + 901 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/957829c2bd5cc39a6f9d659e080757c607882628c29f8d7313cc9801a5dce8e6.jpg", + "image_caption": [ + "Figure 3. Scaling trend for vanilla 1D tokenizers. As the model size increases, the reconstruction quality of vanilla tokenizers improves but the downstream AR Probing gFID consistently degrades. The increasing AR Probing validation loss indicates that scaling vanilla tokenizers results in a more complex latent space, making it difficult for AR models to learn effectively." + ], + "image_footnote": [], + "bbox": [ + 519, + 90, + 643, + 172 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/9beeaae878ebd25ec7a8ed67c02f768536aee94a7f93708f963790666b9054d6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 643, + 90, + 774, + 172 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/ef14e0107eeace631870127030a6ebc25126aabfd00217ebbda2c0dfe9e87aee.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 779, + 90, + 901, + 172 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "framework. It reflects both the reconstruction fidelity of the tokenizer and how well the downstream AR probing model can learn the dependency of the visual tokens (i.e., learnability of the token distribution).", + "bbox": [ + 511, + 284, + 903, + 344 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Validation loss. We use the validation loss of the AR probing model to measure the learnability of the latent tokens as a disentangled factor. The validation loss is calculated as an average of the token-wise cross-entropy loss in the next-token-prediction paradigm on ImageNet [48] 50k validation set. With the same vocabulary size, the same number and structure of visual tokens, and the same AR probing model, larger validation loss indicates a latent space that is more difficult for the AR model to learn. Therefore, we use validation loss to reflect the latent space complexity and learnability for AR models.", + "bbox": [ + 511, + 345, + 905, + 511 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Linear probing accuracy. Beyond visual generation quality, we also investigate whether scaling tokenizers will lead to better visual representations of AR models, which may provide inspiration for future research in unified multimodal understanding and generation with AR models. To assess the representation quality, we adopt the standard practice [11, 23] of linear probing accuracy using features from the middle Transformer layer of the AR probing model.", + "bbox": [ + 511, + 511, + 905, + 633 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Naively Scaling Tokenizers Does Not Work", + "text_level": 1, + "bbox": [ + 511, + 642, + 883, + 657 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To study the challenges when naively scaling visual tokenizers, we train three vector-quantized tokenizers1 on ImageNet [48] at $256 \\times 256$ resolution with increasing model sizes. As shown in Fig. 3, as the tokenizer size increases, although the reconstruction quality (rFID) consistently improves, the AR generation performance (gFID) significantly degrades. This highlights the reconstruction vs. generation dilemma in tokenizer scaling. Moreover, we observe that the validation loss of AR Probing consistently increases as the tokenizers scale, indicating that larger tokenizers lead to complicated token dependencies that are more difficult for the AR model to learn. This observation motivates us to design the semantic regularization to constrain the latent space complexity of the tokenizer and therefore break the", + "bbox": [ + 511, + 665, + 905, + 876 + ], + "page_idx": 2 + }, + { + "type": "page_footnote", + "text": "The tokenizer architectures are described in Sec. 4.1", + "bbox": [ + 513, + 887, + 790, + 898 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "reconstruction vs. generation dilemma in Sec. 4.2.", + "bbox": [ + 89, + 90, + 421, + 104 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4. GigaTok", + "text_level": 1, + "bbox": [ + 89, + 119, + 189, + 137 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this section, we introduce the model structure and training strategies for our scalable visual tokenizer, GigaTok. In Sec. 4.1, we present a tokenizer backbone supporting 1D and 2D token structures, and discuss the asymmetric scaling strategies for the encoder and decoder. In Sec. 4.2, we introduce semantic regularization, which breaks the reconstruction vs. generation dilemma by regularizing the complexity of the latent space with pre-trained visual representations. In Sec. 4.3, we show how entropy loss [69] facilitates the convergence of billion-scale tokenizers.", + "bbox": [ + 89, + 143, + 483, + 297 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.1. Architecture", + "text_level": 1, + "bbox": [ + 89, + 305, + 225, + 320 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The CNN [32] architectures have been the dominant choices for image tokenizers [15, 40, 69, 76] due to their effectiveness in capturing fine-grained local details. Yet, Transformers are more scalable architectures with less inductive bias. Thus, we design a vector quantized tokenizer backbone with a hybrid architecture that combines CNN [15, 32] and Transformer [6, 13, 57] for encoder and decoder (Fig. 4). Specifically, our encoder consists of a series of CNN blocks that progressively downsamples the input image by a factor of $p$ , followed by Transformer layers and a vector quantizer to produce discrete latent codes. Similarly, our decoder consists of multiple Transformer layers, followed by CNN decoders which upsamples the features to obtain the reconstructed image2. Our tokenizer architecture can be adapted to both 1D and 2D tokenizers by using different Transformer designs introduced in the next two paragraphs.", + "bbox": [ + 89, + 327, + 483, + 569 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2D tokenizers with ViT. For 2D tokenizers, the Transformers in both tokenizer encoder and decoder are implemented by ViT [13] architecture. 2D structures of the latent features and tokens are preserved throughout the tokenizer.", + "bbox": [ + 89, + 571, + 483, + 633 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "1D tokenizers with Q-Former. For 1D tokenizers, we implement the Transformer modules in both encoder and decoder as Q-Formers [6, 34]. The Q-Former in the encoder employs 1D queries, transforming 2D input features into 1D latent tokens. The Q-Former in the decoder utilizes 2D queries to transform 1D latent tokens back to 2D features, which are then passed to the CNN decoder to reconstruct images. The 1D tokenizers remove the 2D inductive bias and demonstrate better scalability than 2D tokenizers in our experiments (Sec. 5.5).", + "bbox": [ + 89, + 636, + 483, + 787 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Asymmetric encoder-decoder scaling. Since the decoder faces the more challenging task of reconstructing images from lossy latent codes, we adopt an asymmetric design for more efficient parameter allocation. Specifically, we scale both the encoder and decoder, while ensuring that", + "bbox": [ + 89, + 790, + 483, + 867 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/0c173660c2bed58e0acc195ee84ab6bec0a1321cd7227814a055257e9e31fd4a.jpg", + "image_caption": [ + "Figure 4. GigaTok architecture and semantic regularization." + ], + "image_footnote": [ + "Top: We use a hybrid CNN-Transformer design for our visual tokenizer. The transformer layers are implemented with ViT for 2D tokenizer and Q-Former for 1D tokenizer. Bottom: We use a frozen DINOv2 [43] image encoder for semantic regularization." + ], + "bbox": [ + 517, + 88, + 906, + 276 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "the decoders are always larger than the encoders. In practice, we maintain the same and fixed size for the CNN encoder/decoder and only increase the depth and width of the Transformer modules for scaling.", + "bbox": [ + 511, + 375, + 905, + 436 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.2. Semantic Regularization", + "text_level": 1, + "bbox": [ + 511, + 444, + 741, + 460 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In our pilot study (Sec. 3.2), the latent space complexity significantly increases as the tokenizer scales, which potentially leads to worse downstream AR generation for larger tokenizers. We hypothesize that larger tokenizers tend to capture excessive fine-grained low-level details for better reconstruction, resulting in overly complex latent token distributions, which makes it harder for AR models to learn the token dependencies effectively.", + "bbox": [ + 511, + 465, + 905, + 585 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To address this, we introduce semantic regularization to guide the tokenizer to encode a more semantically consistent latent space, which is less complex and easier for downstream generative modeling. Specifically, we introduce a simple semantic regularization term alongside the tokenizer training objective. The regularization aligns the intermediate features of the tokenizer decoder with the feature representations extracted from pre-trained frozen DINOv2 [43].", + "bbox": [ + 511, + 587, + 905, + 707 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Mathematically, let $f^{\\mathrm{dec},l}$ be the output feature of the $l$ -th layer of the Transformer decoder, $f^{\\mathrm{DINO}}$ be the semantic features of a pretrained image encoder (here DINOv2-B [43]). The semantic regularization can be represented as:", + "bbox": [ + 511, + 707, + 905, + 768 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {r e g}} = \\frac {1}{N} \\sum_ {n = 1} ^ {N} \\sin \\left(f _ {n} ^ {\\mathrm {d e c}, l}, \\phi \\left(f _ {n} ^ {\\mathrm {D I N O}}\\right)\\right) \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 578, + 779, + 906, + 820 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $N$ is the batch size, $n$ is the image index, $\\mathrm{sim}(\\cdot ,\\cdot)$ is a cosine similarity function, and $\\phi (\\cdot)$ is an MLP that projects decoder feature $f^{\\mathrm{dec},l}$ to match the channel dimension of $f^{\\mathrm{DINO}}$ . When training VQ tokenizers, we add the semantic regularization to the original VQGAN [15, 50] objectives:", + "bbox": [ + 511, + 824, + 906, + 902 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "Throughout this work, we use downsample ratio $p = 16$ , codebook dimension $D = 8$ , and codebook size 16384 by default.", + "bbox": [ + 89, + 875, + 482, + 900 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/2a947eeb75e4a83e727c41d1f9c1ee05719590673ed1bddb84605054f0989839.jpg", + "image_caption": [ + "Figure 5. Training curves for 2.9B XL-XXL tokenizers with and without entropy loss. A 2.9B tokenizer does not converge without entropy loss. The entropy loss encourages high codebook usage and stabilizes training loss." + ], + "image_footnote": [], + "bbox": [ + 94, + 89, + 480, + 179 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {t o t a l}} = \\mathcal {L} _ {\\mathrm {v q g a n}} + \\lambda \\mathcal {L} _ {\\text {r e g}}, \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 205, + 282, + 482, + 299 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "and we empirically set $\\lambda = 0.5$ in this work. Here $\\mathcal{L}_{\\mathrm{vqgan}}$ is a combination of multiple losses, including $\\mathcal{L}_{\\mathrm{recon}}$ , the $l_{2}$ reconstruction loss on image pixels, $\\mathcal{L}_{\\mathrm{percp}}$ , the perceptual loss [27, 74], $\\mathcal{L}_{\\mathrm{GAN}}$ , PatchGAN [26] adversarial loss, and $\\mathcal{L}_{\\mathrm{VQ}}$ [15, 66] the VQ codebook loss.", + "bbox": [ + 89, + 306, + 483, + 383 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.3. Entropy Loss for Billion-Level Tokenizers", + "text_level": 1, + "bbox": [ + 89, + 391, + 447, + 407 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "When training a 2.9B tokenizer, we find that using the same training recipe as the 622M tokenizer leads to convergence failure for both perceptual loss and reconstruction loss, and consistently low codebook usage. We hypothesize that low codebook usage accounts for the convergence difficulty. To address this, we incorporate entropy penalty [67, 69] to encourage higher codebook utilization:", + "bbox": [ + 89, + 414, + 483, + 518 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {e n t r o p y}} = \\mathbb {E} _ {\\mathbf {z}} [ H (\\hat {\\mathbf {z}} | \\mathbf {z}) ] - H (\\hat {\\mathbf {z}}) \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 181, + 532, + 482, + 549 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $H(\\cdot)$ denotes the Shannon entropy, $\\mathbf{z} \\in \\mathbb{R}^D$ is the input for quantizer to be quantized to $\\hat{\\mathbf{z}} = \\mathbf{c}_i \\in \\mathbb{R}^D$ and $\\mathbf{c}_i$ is the $i$ -th codebook vector. $\\mathbb{E}_{\\mathbf{z}}[H(\\hat{\\mathbf{z}}|\\mathbf{z})]$ penalizes the uncertainty in quantization to reduce quantization error, and $-H(\\hat{\\mathbf{z}})$ encourages the codebook vectors to be selected more uniformly across the entire codebook. The detailed derivation can be found in our supp. We find that the entropy penalty addresses the convergence difficulty of large tokenizers. As shown in Fig. 5, introducing entropy loss to the 2.9B tokenizer enables the codebook usage to quickly reach a high level, and the loss converges properly3.", + "bbox": [ + 89, + 560, + 483, + 728 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5. Experiments", + "text_level": 1, + "bbox": [ + 89, + 741, + 223, + 758 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5.1. Settings", + "text_level": 1, + "bbox": [ + 89, + 766, + 189, + 782 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "For scaling up visual tokenizers, we follow the architecture configurations for the Transformers in GigaTok tokenizers as summarized in Tab. 1. We evaluate the tokenizers from three perspectives: reconstruction, downstream AR generation, and downstream AR representation quality. We use", + "bbox": [ + 89, + 789, + 483, + 866 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/79b53a77bb3a6bf0032d29e80aaf661efdeae855aaf6a8344064a59663aa038f.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
TypeEnc./Dec.Params.BlocksHeadsDim.
1D Tok.S26M68512
B115M1212768
L405M24161024
XL948M36201280
XXL1870M48241536
2D Tok.S19M68512
B86M1212768
L329M24161024
", + "bbox": [ + 529, + 88, + 893, + 236 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 1. Architectures of the transformer variants for tokenizer encoder/decoder parts in our experiments. We use Q-Former [6, 34] for 1D tokenizers and ViT [13] for 2D tokenizers.", + "bbox": [ + 511, + 246, + 903, + 287 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/f8643373abb334d25b9757a02516d0285a771299f840593828f0c30f059ad67d.jpg", + "image_caption": [ + "Figure 6. Correlation between AR Probing Performance and Larger AR models. For 3 tokenizers: S-S, S-L, and B-L, we present that as the tokenizer improves, the performance improvements of AR Probing correlate to the performance improvements of larger AR models. Therefore, the AR Probing can effectively indicate how the tokenizer affects downstream larger AR models with limited computational costs." + ], + "image_footnote": [], + "bbox": [ + 514, + 304, + 705, + 368 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/18a6c7c53b14968caeb727bb6a912f7d19b3dcee70012feb02222289b7bdc5de.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 705, + 305, + 901, + 369 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "rFID and LPIPS [74] to evaluate reconstruction fidelity, gFID to evaluate generation performance, and linear probing to evaluate the representation quality of the downstream AR model. Our downstream AR models are LlamaGen [50] with 1D absolute positional embedding. Our scaling experiments (Sec. 5.2) and ablation study (Sec. 5.3) use AR Probing (111M AR model described in Sec.3.1) validation loss, gFID, and linear probing to reflect the learnability of tokens, generation performance, and representation quality, respectively. While in the system-level comparison (Sec. 5.4), we train larger 1.4B AR models for comparison with previous work. More details are in the supplementary material.", + "bbox": [ + 511, + 500, + 905, + 681 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Effectiveness of AR Probing. As shown in Fig. 6, AR Probing performances including gFID and linear probing accuracy align with the larger LlamaGen-XL [50] model results. Therefore, we use AR Probing throughout the following experiments except for the system-level comparison.", + "bbox": [ + 511, + 681, + 905, + 758 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5.2. Scaling with Semantic Regularization", + "text_level": 1, + "bbox": [ + 511, + 770, + 836, + 787 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We demonstrate that our proposed semantic regularization resolves the reconstruction vs. generation dilemma in scaling tokenizers.", + "bbox": [ + 511, + 794, + 903, + 838 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Model scaling with semantic regularization. Results are shown in Fig. 7. (1) Semantic regularization improves the reconstruction fidelity, indicated by lower rFID. (2) More importantly, the AR Probing validation loss and gFID de", + "bbox": [ + 511, + 839, + 905, + 901 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "We take perceptual loss as an example, and reconstruction loss shows a similar pattern", + "bbox": [ + 89, + 875, + 482, + 901 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/9acdec04d8802aac59b5a78b7e590b8e1c81de2dec608a2de7b670863a259d32.jpg", + "image_caption": [ + "Figure 7. Scaling trends of tokenizers for reconstruction, downstream generation and representation quality with and without semantic regularization. By semantic regularization, GigaTok resolves the reconstruction vs. generation dilemma for tokenizer scaling in contrast to the vanilla version without semantic regularization. Moreover, GigaTok consistently improves the representation quality of downstream AR models by scaling up visual tokenizers. Note that in the last two figures, the red and blue curves correspond to different scales on the y-axis." + ], + "image_footnote": [], + "bbox": [ + 96, + 111, + 232, + 229 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/2257c1fefec13ade9fff8b68b9354ff3fdf8dc5549b55947d1ab3e70a2d4d28e.jpg", + "image_caption": [ + "w/o semantic regularization w/ semantic regularization" + ], + "image_footnote": [], + "bbox": [ + 272, + 111, + 392, + 229 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/aa782d3f324e1ed9e8dfe46438225cb4c23c64580e41df85bdd83c38411467d8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 400, + 111, + 584, + 229 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/4c7ed3e7acb8d6a5d8a3737863302c3fcc97ce6fe3d7eebe9cf13c07e2792d61.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 588, + 111, + 728, + 229 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/368c793c150808e4ed2a72a13b21b120c092ed9773c0128a63932362974ebca5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 728, + 111, + 901, + 229 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/a74a26f6ca59a171d98ac0daf44e41e2db54df84c3c51938647e5d2387d887a0.jpg", + "image_caption": [ + "Figure 8. Visualization of tokenizer features with and without semantic regularization. We compute PCA among the tokenizer features of a group of images of the same \"golden retriever\" class and visualize the first 3 PCA components. We observe that the latent space of vanilla tokenizers shows inconsistent features both within a single image or across multiple semantically similar images. In contrast, GigaTok encodes images with semantic consistency and thus reduces the latent space complexity for AR models." + ], + "image_footnote": [], + "bbox": [ + 94, + 330, + 480, + 391 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "grades for larger tokenizers without semantic regularization, showing the reconstruction vs. generation dilemma. The dilemma is addressed with semantic regularization, evidenced by the relatively constrained validation loss and consistently decreasing gFID. (3) The Linear Probing results show that semantic regularization helps AR models to learn better representations as the tokenizer model scales up.", + "bbox": [ + 89, + 534, + 482, + 638 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Visualization for the tokenizer feature space. We visualize the first 3 PCA components of the tokenizer features from the first Transformer decoder layer for a group of images. As shown in Fig. 8, we find the vanilla tokenizer encodes a latent space with limited semantic consistency, which potentially impairs its learnability for downstream AR models. In contrast, GigaTok presents semantically consistent patterns (Fig. 8), indicating a meaningful and consistent latent space.", + "bbox": [ + 89, + 640, + 482, + 775 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.3. Asymmetric 1D Tokenizer is More Scalable", + "text_level": 1, + "bbox": [ + 89, + 784, + 457, + 799 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Tokenizer decoder deserves more parameters. To determine whether the decoder or encoder should be prioritized when scaling up, we compare S-B $^4$ and B-S tokenizers in Tab. 2, both trained under the same setting for 100", + "bbox": [ + 89, + 806, + 482, + 867 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/7679cbf817b12edaa14706b6831569f0b7f4575518cabf088803fec2247636ff.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Enc./Dec. SizerFID↓LPIPS↓gFID↓Lin Acc.↑
B-S0.980.2216.5664.5
S-B0.940.2145.6559.8
S-L0.830.2065.1960.6
B-L0.810.2064.8266.9
", + "bbox": [ + 514, + 330, + 913, + 429 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 2. The results for scaling encoder/decoder. Prioritizing the scaling of decoders benefits downstream generation more than scaling encoders (S-B v.s. B-S). But scaling encoders can still bring significant improvements (S-L v.s. B-L).", + "bbox": [ + 511, + 440, + 906, + 496 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/bd97cce6fd6b87446461e6eb3df1ebedede28a455da8eb46153e9b4f34ceeb2b.jpg", + "image_caption": [ + "Figure 9. Scalability comparison for 1D and 2D tokenizers. Using the same training setting, 1D tokenizers shows better reconstruction (rFID) and downstream representation quality (AR Probing: Lin Acc.). For downstream generation (gFID), 1D tokenizers present a steeper improving trend than 2D tokenizers." + ], + "image_footnote": [], + "bbox": [ + 517, + 507, + 898, + 616 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "epochs. Our results show that scaling decoders, rather than encoders, leads to greater improvements in both reconstruction and downstream generation, suggesting that decoder scaling should be prioritized.", + "bbox": [ + 511, + 718, + 903, + 779 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Scaling tokenizer encoder is also important. While prioritizing the scaling of tokenizer decoders yields significant benefits, we also find that scaling tokenizer encoders can further enhance downstream models. In Tab. 2, we show that a B-L tokenizer gains significant improvements compared to an S-L tokenizer. Therefore, we recommend scaling both encoders and decoders while maintaining a larger decoder than the encoder for optimal performance.", + "bbox": [ + 511, + 780, + 906, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "X-Y tokenizer denotes X-sized encoder and Y-sized decoder. For example, S-B indicates Small encoder-Base decoder structure", + "bbox": [ + 89, + 875, + 482, + 898 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/407127a1c9516fe4df42fdca4780af32a20fdb67d771d49da601eecdcc3f5880.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
TokenizerTok. Type/Param.#TokensrFID↓Generator Model/Param.TypegFID↓Acc.↑
Continuous token modeling
VAE [47]KL†55M40960.27LDM-4 [47]400MDiff.3.60-
DiT-XL/2 [44]675MDiff.2.27-
SD-VAE [1]KL†84M10240.62SiT-XL/2 [42]675MDiff.2.06-
SiT-XL/2 + REPA [71]675MDiff.1.4274.6
VA-VAE [65]KL70M2560.28LightningDiT [65]675MDiff.1.35-
VAE [35]KL66M2560.53MAR-H [35]943MAR+Diff.1.5560.0°
Discrete token modeling
VQGAN [8]VQ66M2562.28MaskGIT [8]227MMask.6.18*-
TiTok-S [70]VQ72M1281.71MaskGIT-UViT-L [4, 8]287MMask.1.97-
TiTok-L [70]VQ641M322.21MaskGIT-ViT [8]177MMask.2.77-
B-AE-d32 [22]LFQ66M2561.69BiGR-XXL-d32 [22]1.5BAR+Diff2.36-
BiGR-XL-d32 [22]799MAR+Diff-69.8
VAR-Tok. [53]MSRQ†109M6801.00‡VAR-d24 [53]1.0BVAR2.09-
VAR-d30 [53]2.0BVAR1.92-
ImageFolder [36]MSRQ176M2860.80‡ImageFolder-VAR [36]362MVAR2.60-
VQGAN [15]VQ23M2564.98Taming-Tran. [15]1.4BAR15.78*-
ViT-VQGAN [66]VQ64M10241.28VIM-Large [66]1.7BAR4.17*-
RQ-VAE [33]RQ66M2563.20RQTran. [33]3.8BAR7.55*-
Open-MAGVIT2 [40]LFQ133M2561.17Open-MAGVIT2-XL [40]1.5BAR2.53-
IBQ [49]IBQ128M2561.37IBQ-XXL [49]2.1BAR2.05-
LlamaGen-Tok. [50]VQ72M2562.19LlamaGen-L [50]343MAR3.8140.5°
LlamaGen-XXL [50]1.4BAR3.09-
LlamaGen-Tok. [50]VQ72M5760.94LlamaGen-XXL [50]1.4BAR2.34-
GigaTok-B-LVQ622M2560.51‡LlamaGen-B (1d) [50]111MAR3.3367.7
GigaTok-S-SVQ136M2561.01LlamaGen-B (1d) [50]111MAR4.0562.6
GigaTok-S-BVQ232M2560.89LlamaGen-B (1d) [50]111MAR3.8362.9
GigaTok-B-LVQ622M2560.81LlamaGen-B (1d) [50]111MAR3.2667.6
LlamaGen-XXL (1d) [50]1.4BAR2.03*69.4
GigaTok-XL-XXLVQ2.9B2560.79LlamaGen-B (1d) [50]111MAR3.1572.0
LlamaGen-XXL (1d) [50]1.4BAR1.98*74.0
", + "bbox": [ + 102, + 88, + 895, + 588 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 3. System-level comparison for tokenizers and downstream generation models on ImageNet $256 \\times 256$ . For gFID, we present the lowest value between w/ or w/o CFG scenarios. †: Training set includes data besides ImageNet. ‡: Using frozen DINO [7] for discriminator, which largely improves rFID. ☆: Without classifier-free-guidance. ◇: Data from BiGR [22].", + "bbox": [ + 89, + 597, + 906, + 641 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "1D tokenizers are more scalable than 2D tokenizers. We train S-S, S-B and B-L 1D/2D tokenizers with the same setting with semantic regularization. As shown in Fig. 9, 1D tokenizers consistently achieve better rFID and AR Probing linear probing accuracy than 2D tokenizers. For AR Probing gFID, the 1D tokenizers exhibit a steeper scaling trend, eventually surpassing 2D tokenizers as the model scales. We attribute the superior scalability of 1D tokenizers to the reduced inductive bias.", + "bbox": [ + 89, + 659, + 482, + 795 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.4. System-level Comparison", + "text_level": 1, + "bbox": [ + 89, + 801, + 323, + 819 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Experiment Settings. Using GigaTok for tokenization, we scale the training of LlamaGen [50] AR models on $256 \\times 256$ ImageNet training set for 300 epochs to compare with other methods. We do not use AdaLN [44, 53] as it is specific for class-conditional generation. We provide", + "bbox": [ + 89, + 824, + 483, + 902 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "the results of a B-L tokenizer trained with DINO discriminator [36, 53] to fairly compare rFID. But in practice we find DINO discriminator provides limited improvement for LPIPS and may affect the training stability of billion-scale tokenizers. Therefore, we exclude it from our main design.", + "bbox": [ + 511, + 659, + 906, + 737 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Results. As shown in Tab. 3, our 2.9B GigaTok achieves state-of-the-art reconstruction performance (rIFD) among all discrete tokenizers. Furthermore, with our 2.9B tokenizer, the downstream 1.4B AR model achieves state-of-the-art image generation performance (gFID) among LLM-style autoregressive next-token-prediction models. VAR [53] predicts images with next-scale prediction rather than next-token-prediction, which is less compatible with language models. Our model achieves comparable gFID to VAR [53] with a simple LLM-style downstream AR genera", + "bbox": [ + 511, + 750, + 908, + 902 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/d13b14d845494dadf9a20a896b770459572d94b3dd032d5d792021b54867189b.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Decoder\\AR Model SizeBLXXL
B3.7%2.3%1.3%
L11.2%7.0%3.4%
XXL32.4%20.3%9.9%
", + "bbox": [ + 106, + 88, + 470, + 165 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "tor without incorporating vision-specific designs like VAR. Moreover, this 1.4B AR model trained on the 2.9B tokenizer achieves state-of-the-art linear probing accuracy via visual generative pretraining5. This indicates that our GigaTok helps the downstream generation model to learn better representations. The high-quality representation learned from generative pre-training may also help unify generation and understanding for future native multimodal models.", + "bbox": [ + 89, + 255, + 483, + 377 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.5. Discussion and Ablation Study", + "text_level": 1, + "bbox": [ + 89, + 383, + 362, + 400 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/86e9797e3f6e778b4e27c04bb9c93e8c669942a1dfc25453926910afa82086d5.jpg", + "table_caption": [ + "Table 4. Ratio of time consumptions for tokenizer decoding during image generation. When we use a 2.9B XLXXL tokenizer for a 1.4B LlamaGen-XXL AR model, the tokenizer decoding only takes $9.9\\%$ of the total inference time." + ], + "table_footnote": [], + "table_body": "
Align. Layer lrFID↓LPIPS↓gFID↓Lin Acc.↑
21.060.2246.2663.4
31.010.2236.1061.9
41.070.2236.0758.6
", + "bbox": [ + 91, + 417, + 486, + 494 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/6d6262b791a8ccca0ad91111cb5a0b1313980e427f8b7c97c20147297de64888.jpg", + "table_caption": [ + "Table 5. Layer $l$ for semantic regularization (S-S tokenizer). Smaller $l$ brings better downstream AR model representations but can sacrifice reconstruction and downstream generation quality. We choose $l = 3$ by default for more balanced performance." + ], + "table_footnote": [], + "table_body": "
Sem. Enc.rFID↓LPIPS↓gFID↓Lin Acc.↑
CLIP [16, 46]0.910.2106.3561.4
SigLIP [72]0.920.2106.2056.7
DINOv2-B [43]0.850.2125.5564.4
", + "bbox": [ + 91, + 587, + 496, + 664 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Discussion on generation costs. When generating an image, AR models take multiple passes to predict tokens, while tokenizers only need one forward pass. Therefore, the time consumption for decoding tokens to images is relatively small compared to AR models. We record the ratio of time spent on tokenizer decoding for different tokenizer/AR models in Tab. 4. For a 1.4B AR model, our largest 2.9B tokenizer takes only $\\sim 10\\%$ of the total inference time.", + "bbox": [ + 89, + 734, + 482, + 854 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/f4034ae46c2d3aa8bd499318635ef2d35fe3c4609eb808a740ddf39df9fd08b0.jpg", + "table_caption": [ + "Table 6. Ablation study for the choice of pretrained semantic encoders (S-B tokenizer). DINOv2-B delivers the best performance among all models." + ], + "table_footnote": [], + "table_body": "
Sem. Reg. λrFID↓LPIPS↓gFID↓Lin Acc.↑
0.251.280.2266.2757.0
0.501.220.2286.3958.6
0.751.270.2366.2958.6
1.001.380.2396.2762.5
", + "bbox": [ + 519, + 88, + 903, + 181 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 7. Ablation Study for the semantic regularization weight (S-S tokenizer). A strong semantic regularization weight leads to worse reconstruction but better downstream representation. We choose $\\lambda = 0.5$ by default for more balanced performance.", + "bbox": [ + 511, + 191, + 906, + 247 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Searching the best layer for semantic regularization. We search $l$ , the layer's index in the Transformer decoder before intermediate features are extracted to calculate semantic regularization in Eq. 1. As shown in Tab. 5, varying $l$ presents a trade-off between gFID and the Lin Acc. for AR Probing. Smaller $l$ means stricter regularization for the latent space so that the downstream generation models learn better representation. However, smaller $l$ also sacrifices generation quality. We choose $l = 3$ for a more balanced rFID, gFID, and linear probing accuracy for all tokenizers.", + "bbox": [ + 511, + 273, + 906, + 426 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Exploring pretrained semantic encoder choices. We compare CLIP (DFN) [16, 46], SigLIP-400M [72] and DINOv2-B [43] as the source of semantic regularization for S-B tokenizers. As shown in Tab. 6, utilizing DINOv2-B as the semantic encoder for regularization produces the best tokenizer for reconstruction, downstream class conditional generation and representation quality.", + "bbox": [ + 511, + 428, + 908, + 536 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Exploring weights for semantic regularization. We study the effects of different regularization weights $\\lambda$ (Eq. 2), from 0.25 to 1.00. As shown in Tab. 7, a large $\\lambda$ (0.75, 1.00) will damage the reconstruction quality but benefits the linear probing accuracy, whereas smaller $\\lambda$ (0.25) results in suboptimal rFID and linear probing accuracy. We choose the more balanced $\\lambda = 0.5$ as a default for all tokenizers.", + "bbox": [ + 511, + 537, + 908, + 643 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Conclusion", + "text_level": 1, + "bbox": [ + 511, + 657, + 633, + 672 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this work, we study and address the reconstruction vs. generation dilemma for scaling visual tokenizers. We identify that the dilemma stems from increasing latent space complexity in larger tokenizers. We propose semantic regularization to effectively regularize the tokenizer latent space by injecting pre-trained representations to align with tokenizer features in training. The semantic regularization, together with several key practices we explored, lead to the first 3B tokenizer, GigaTok, that achieves state-of-the-art reconstruction, downstream AR generation, and downstream AR representation quality. Please refer to discussions on limitations and future work in supplementary materials.", + "bbox": [ + 511, + 681, + 908, + 864 + ], + "page_idx": 7 + }, + { + "type": "page_footnote", + "text": "REPA [71] achieves better representation by directly distilling pretrained representations to the generation model, which is not a fair comparison with ours as we do not leverage the supervision for AR training.", + "bbox": [ + 89, + 862, + 482, + 901 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgments", + "text_level": 1, + "bbox": [ + 91, + 90, + 250, + 107 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "This work is partially supported by the National Nature Science Foundation of China (No. 62402406).", + "bbox": [ + 89, + 114, + 480, + 143 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The authors also sincerely thank Qihang Yu and Liang-Chieh Chen for their valuable discussions during the development of GigaTok.", + "bbox": [ + 89, + 145, + 480, + 191 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 91, + 202, + 187, + 218 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] stabilityyai/sd-vae-ft-ema. https://huggingface.co/stabilityyai/sd-vae-ft-ema, 2023.7", + "[2] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023. 1", + "[3] Roman Bachmann, Jesse Allardice, David Mizrahi, Enrico Fini, Oğuzhan Fatih Kar, Elmira Amirloo, Alaaeldin El-Nouby, Amir Zamir, and Afshin Dehghan. Flextok: Resampling images into 1d token sequences of flexible length. arXiv preprint arXiv:2502.13967, 2025. 3", + "[4] Fan Bao, Shen Nie, Kaiwen Xue, Yue Cao, Chongxuan Li, Hang Su, and Jun Zhu. All are worth words: A vit backbone for diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 22669-22679, 2023. 7", + "[5] Xiao Bi, Deli Chen, Guanting Chen, Shanhuang Chen, Damai Dai, Chengqi Deng, Honghui Ding, Kai Dong, Qiushi Du, Zhe Fu, et al. Deepseek llm: Scaling open-source language models with longtermism. arXiv preprint arXiv:2401.02954, 2024. 1", + "[6] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European conference on computer vision, pages 213-229. Springer, 2020. 4, 5, 1", + "[7] Mathilde Caron, Hugo Touvron, Ishan Misra, Hervé Jégou, Julien Mairal, Piotr Bojanowski, and Armand Joulin. Emerging properties in self-supervised vision transformers. In Proceedings of the IEEE/CVF international conference on computer vision, pages 9650-9660, 2021. 2, 7, 3", + "[8] Huiwen Chang, Han Zhang, Lu Jiang, Ce Liu, and William T Freeman. Maskgit: Masked generative image transformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11315-11325, 2022. 2, 7", + "[9] Hao Chen, Ze Wang, Xiang Li, Xineng Sun, Fangyi Chen, Jiang Liu, Jindong Wang, Bhiksha Raj, Zicheng Liu, and Emad Barsoum. Softvq-vae: Efficient 1-dimensional continuous tokenizer. arXiv preprint arXiv:2412.10958, 2024. 3", + "[10] Hao Chen, Yujin Han, Fangyi Chen, Xiang Li, Yidong Wang, Jindong Wang, Ze Wang, Zicheng Liu, Difan Zou, and Bhiksha Raj. Masked autoencoders are effective tokenizers for diffusion models. arXiv preprint arXiv:2502.03444, 2025. 3", + "[11] Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, and Ilya Sutskever. Generative pre" + ], + "bbox": [ + 93, + 227, + 482, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "training from pixels. In International conference on machine learning, pages 1691-1703. PMLR, 2020. 3", + "[12] Xiaokang Chen, Zhiyu Wu, Xingchao Liu, Zizheng Pan, Wen Liu, Zhenda Xie, Xingkai Yu, and Chong Ruan. Januspro: Unified multimodal understanding and generation with data and model scaling. arXiv preprint arXiv:2501.17811, 2025. 1", + "[13] Alexey Dosovitskiy. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020. 1, 4, 5", + "[14] Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024. 1", + "[15] Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 12873-12883, 2021. 1, 2, 4, 5, 7", + "[16] Alex Fang, Albin Madappally Jose, Amit Jain, Ludwig Schmidt, Alexander Toshev, and Vaishaal Shankar. Data filtering networks. arXiv preprint arXiv:2309.17425, 2023. 8", + "[17] Christopher Fifty, Ronald G Junkins, Dennis Duan, Aniketh Iger, Jerry W Liu, Ehsan Amid, Sebastian Thrun, and Christopher Ré. Restructuring vector quantization with the rotation trick. arXiv preprint arXiv:2410.06424, 2024. 2", + "[18] Yuying Ge, Yixiao Ge, Ziyun Zeng, Xintao Wang, and Ying Shan. Planting a seed of vision in large language model. arXiv preprint arXiv:2307.08041, 2023. 3", + "[19] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025. 1", + "[20] Alexander Hagiéle, Elie Bakouch, Atli Kosson, Loubna Ben Allal, Leandro Von Werra, and Martin Jaggi. Scaling laws and compute-optimal training beyond fixed training durations. arXiv preprint arXiv:2405.18392, 2024. 1", + "[21] Philippe Hansen-Estruch, David Yan, Ching-Yao Chung, Orr Zohar, Jialiang Wang, Tingbo Hou, Tao Xu, Sriram Vishwanath, Peter Vajda, and Xinlei Chen. Learnings from scaling visual tokenizers for reconstruction and generation. arXiv preprint arXiv:2501.09755, 2025. 1, 4", + "[22] Shaozhe Hao, Xuantong Liu, Xianbiao Qi, Shihao Zhao, Bojia Zi, Rong Xiao, Kai Han, and Kwan-Yee K Wong. Bigrr: Harnessing binary latent codes for image generation and improved visual representation capabilities. arXiv preprint arXiv:2410.14672, 2024. 7", + "[23] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, and Ross Girshick. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 16000-16009, 2022. 2, 3", + "[24] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilib-" + ], + "bbox": [ + 516, + 92, + 903, + 901 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "rium. Advances in neural information processing systems, 30, 2017. 3", + "[25] Shengding Hu, Yuge Tu, Xu Han, Chaoqun He, Ganqu Cui, Xiang Long, Zhi Zheng, Yewei Fang, Yuxiang Huang, Weilin Zhao, et al. Minicpm: Unveiling the potential of small language models with scalable training strategies. arXiv preprint arXiv:2404.06395, 2024. 1", + "[26] Phillip Isola, Jun-Yan Zhu, Tinghui Zhou, and Alexei A Efros. Image-to-image translation with conditional adversarial networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1125-1134, 2017. 5", + "[27] Justin Johnson, Alexandre Alahi, and Li Fei-Fei. Perceptual losses for real-time style transfer and super-resolution. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part II 14, pages 694-711. Springer, 2016. 5", + "[28] Diederik P Kingma. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013. 2, 4", + "[29] Diederik P Kingma, Max Welling, et al. An introduction to variational autoencoders. Foundations and Trends® in Machine Learning, 12(4):307-392, 2019. 4", + "[30] Dan Kondratyuk, Lijun Yu, Xiuye Gu, Jose Lezama, Jonathan Huang, Grant Schindler, Rachel Hornung, Vighnesh Birodkar, Jimmy Yan, Ming-Chang Chiu, Krishna Somandepalli, Hassan Akbari, Yair Alon, Yong Cheng, Joshua V. Dillon, Agrim Gupta, Meera Hahn, Anja Hauth, David Hendon, Alonso Martinez, David Minnen, Mikhail Sirotenko, Kihyuk Sohn, Xuan Yang, Hartwig Adam, Ming-Hsuan Yang, Irfan Essa, Huisheng Wang, David A Ross, Bryan Seybold, and Lu Jiang. Videopoet: A large language model for zero-shot video generation. In Proceedings of the 41st International Conference on Machine Learning, 2024. 1", + "[31] Tuomas Kynkänniemi, Miika Aittala, Tero Karras, Samuli Laine, Timo Aila, and Jaakko Lehtinen. Applying guidance in a limited interval improves sample and distribution quality in diffusion models. arXiv preprint arXiv:2404.07724, 2024. 1", + "[32] Yann LeCun, Yoshua Bengio, et al. Convolutional networks for images, speech, and time series. The handbook of brain theory and neural networks, 3361(10):1995, 1995. 4", + "[33] Doyup Lee, Chiheon Kim, Saehoon Kim, Minsu Cho, and Wook-Shin Han. Autoregressive image generation using residual quantization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11523-11532, 2022. 2, 7", + "[34] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In International conference on machine learning, pages 19730-19742. PMLR, 2023. 4, 5, 1", + "[35] Tianhong Li, Yonglong Tian, He Li, Mingyang Deng, and Kaiming He. Autoregressive image generation without vector quantization. arXiv preprint arXiv:2406.11838, 2024. 7", + "[36] Xiang Li, Kai Qiu, Hao Chen, Jason Kuen, Jiquiang Gu, Bhiksha Raj, and Zhe Lin. Imagefolder: Autoregres" + ], + "bbox": [ + 91, + 92, + 482, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "sive image generation with folded tokens. arXiv preprint arXiv:2410.01756, 2024. 2, 3, 7, 4", + "[37] Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024. 1", + "[38] Dongyang Liu, Shitian Zhao, Le Zhuo, Weifeng Lin, Yu Qiao, Hongsheng Li, and Peng Gao. Lumina-mgpt: Illuminate flexible photorealistic text-to-image generation with multimodal generative pretraining. arXiv preprint arXiv:2408.02657, 2024. 2", + "[39] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In International Conference on Learning Representations, 2019. 2", + "[40] Zhuoyan Luo, Fengyuan Shi, Yixiao Ge, Yujiu Yang, Limin Wang, and Ying Shan. Open-magvit2: An open-source project toward democratizing auto-regressive visual generation. arXiv preprint arXiv:2409.04410, 2024. 2, 4, 7", + "[41] Chuofan Ma, Yi Jiang, Junfeng Wu, Jihan Yang, Xin Yu, Zehuan Yuan, Bingyue Peng, and Xiaojuan Qi. Unitok: A unified tokenizer for visual generation and understanding. arXiv preprint arXiv:2502.20321, 2025. 3", + "[42] Nanye Ma, Mark Goldstein, Michael S Albergo, Nicholas M Boffi, Eric Vanden-Eijnden, and Saining Xie. Sit: Exploring flow and diffusion-based generative models with scalable interpolant transformers. In European Conference on Computer Vision, pages 23-40. Springer, 2024. 7", + "[43] Maxime Oquab, Timothee Darcet, Theo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193, 2023. 2, 4, 8, 3", + "[44] William Peebles and Saining Xie. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4195-4205, 2023. 7, 1", + "[45] Liao Qu, Huichao Zhang, Yiheng Liu, Xu Wang, Yi Jiang, Yiming Gao, Hu Ye, Daniel K Du, Zehuan Yuan, and Xinglong Wu. Tokenflow: Unified image tokenizer for multimodal understanding and generation. arXiv preprint arXiv:2412.03069, 2024. 1", + "[46] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PmLR, 2021. 2, 8", + "[47] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 7, 1", + "[48] Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. International journal of computer vision, 115:211-252, 2015. 3" + ], + "bbox": [ + 516, + 92, + 906, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[49] Fengyuan Shi, Zhuoyan Luo, Yixiao Ge, Yujiu Yang, Ying Shan, and Limin Wang. Taming scalable visual tokenizer for autoregressive image generation. arXiv preprint arXiv:2412.02692, 2024. 2, 7", + "[50] Peize Sun, Yi Jiang, Shoufa Chen, Shilong Zhang, Bingyue Peng, Ping Luo, and Zehuan Yuan. Autoregressive model beats diffusion: Llama for scalable image generation. 2024. 1, 2, 3, 4, 5, 7", + "[51] Gemini Team, Rohan Anil, Sebastian Borgeaud, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, Katie Millican, et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023. 1", + "[52] Gemini Team, Petko Georgiev, Ving Ian Lei, Ryan Burnell, Libin Bai, Anmol Gulati, Garrett Tanzer, Damien Vincent, Zhufeng Pan, Shibo Wang, et al. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530, 2024. 1", + "[53] Keyu Tian, Yi Jiang, Zehuan Yuan, BINGYUE PENG, and Liwei Wang. Visual autoregressive modeling: Scalable image generation via next-scale prediction. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. 2, 7, 1, 3", + "[54] Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023. 1, 3", + "[55] Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023. 1", + "[56] Aaron Van Den Oord, Oriol Vinyals, et al. Neural discrete representation learning. Advances in neural information processing systems, 30, 2017. 2", + "[57] A Vaswani. Attention is all you need. Advances in Neural Information Processing Systems, 2017. 4", + "[58] Hanyu Wang, Saksham Suri, Yixuan Ren, Hao Chen, and Abhinav Shrivastava. Larp: Tokenizing videos with a learned autoregressive generative prior. In ICLR, 2025. 2", + "[59] Luting Wang, Yang Zhao, Zijian Zhang, Jiashi Feng, Si Liu, and Bingyi Kang. Image understanding makes for a good tokenizer for image generation. arXiv preprint arXiv:2411.04406, 2024. 3", + "[60] Xinlong Wang, Xiaosong Zhang, Zhengxiong Luo, Quan Sun, Yufeng Cui, Jinsheng Wang, Fan Zhang, Yueze Wang, Zhen Li, Qiying Yu, et al. Emu3: Next-token prediction is all you need. arXiv preprint arXiv:2409.18869, 2024. 1, 2", + "[61] Mark Weber, Lijun Yu, Qihang Yu, Xueqing Deng, Xiaohui Shen, Daniel Cremers, and Liang-Chieh Chen. Maskbit: Embedding-free image generation via bit tokens. arXiv preprint arXiv:2409.16211, 2024. 2", + "[62] Chengyue Wu, Xiaokang Chen, Zhiyu Wu, Yiyang Ma, Xingchao Liu, Zizheng Pan, Wen Liu, Zhenda Xie, Xingkai Yu, Chong Ruan, et al. Janus: Decoupling visual encoding for unified multimodal understanding and generation. arXiv preprint arXiv:2410.13848, 2024. 1" + ], + "bbox": [ + 91, + 90, + 480, + 898 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[63] Yecheng Wu, Zhuoyang Zhang, Junyu Chen, Haotian Tang, Dacheng Li, Yunhao Fang, Ligeng Zhu, Enze Xie, Hongxu Yin, Li Yi, et al. Vila-u: a unified foundation model integrating visual understanding and generation. arXiv preprint arXiv:2409.04429, 2024. 3", + "[64] Wanghan Xu, Xiaoyu Yue, Zidong Wang, Yao Teng, Wenlong Zhang, Xihui Liu, Luping Zhou, Wanli Ouyang, and Lei Bai. Exploring representation-aligned latent space for better generation. arXiv preprint arXiv:2502.00359, 2025.", + "[65] Jingfeng Yao and Xinggang Wang. Reconstruction vs. generation: Taming optimization dilemma in latent diffusion models. arXiv preprint arXiv:2501.01423, 2025. 3, 7, 4", + "[66] Jiahui Yu, Xin Li, Jing Yu Koh, Han Zhang, Ruoming Pang, James Qin, Alexander Ku, Yuanzhong Xu, Jason Baldridge, and Yonghui Wu. Vector-quantized image modeling with improved vqgan. arXiv preprint arXiv:2110.04627, 2021. 1, 2, 5, 7", + "[67] Lijun Yu, Yong Cheng, Kihyuk Sohn, José Lezama, Han Zhang, Huiwen Chang, Alexander G Hauptmann, Ming-Hsuan Yang, Yuan Hao, Irfan Essa, et al. Magvit: Masked generative video transformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10459-10469, 2023. 5, 1", + "[68] Lijun Yu, Yong Cheng, Zhiruo Wang, Vivek Kumar, Wolfgang Macherey, Yanping Huang, David Ross, Irfan Essa, Yonatan Bisk, Ming-Hsuan Yang, et al. Spae: Semantic pyramid autoencoder for multimodal generation with frozen llms. Advances in Neural Information Processing Systems, 36:52692-52704, 2023. 3", + "[69] Lijun Yu, José Lezama, Nitesh B Gundavarapu, Luca Versari, Kihyuk Sohn, David Minnen, Yong Cheng, Vighnesh Birodkar, Agrim Gupta, Xiuye Gu, et al. Language model beats diffusion-tokenizer is key to visual generation. arXiv preprint arXiv:2310.05737, 2023. 1, 2, 4, 5", + "[70] Qihang Yu, Mark Weber, Xueqing Deng, Xiaohui Shen, Daniel Cremers, and Liang-Chieh Chen. An image is worth 32 tokens for reconstruction and generation. arXiv preprint arXiv:2406.07550, 2024. 2, 7, 3", + "[71] Sihyun Yu, Sangkyung Kwak, Huiwon Jang, Jongheon Jeong, Jonathan Huang, Jinwoo Shin, and Saining Xie. Representation alignment for generation: Training diffusion transformers is easier than you think. arXiv preprint arXiv:2410.06940, 2024. 3, 7, 8, 2", + "[72] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In Proceedings of the IEEE/CVF international conference on computer vision, pages 11975-11986, 2023. 2, 8", + "[73] Baoquan Zhang, Huaibin Wang, Chuyao Luo, Xutao Li, Guotao Liang, Yunming Ye, Xiaochen Qi, and Yao He. Codebook transfer with part-of-speech for vector-quantized image modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7757–7766, 2024. 3", + "[74] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 586-595, 2018. 5" + ], + "bbox": [ + 516, + 90, + 905, + 898 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[75] Yue Zhao, Yuanjun Xiong, and Philipp Krajhenbuhl. Image and video tokenization with binary spherical quantization. arXiv preprint arXiv:2406.07548, 2024. 2", + "[76] Lei Zhu, Fangyun Wei, Yanye Lu, and Dong Chen. Scaling the codebook size of vqgan to 100,000 with a utilization rate of $99\\%$ . arXiv preprint arXiv:2406.11837, 2024. 2, 3, 4", + "[77] Yongxin Zhu, Bocheng Li, Hang Zhang, Xin Li, Linli Xu, and Lidong Bing. Stabilize the latent space for image autoregressive modeling: A unified perspective. arXiv preprint arXiv:2410.12490, 2024. 3" + ], + "bbox": [ + 89, + 90, + 482, + 232 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "GigaTok: Scaling Visual Tokenizers to 3 Billion Parameters for Autoregressive Image Generation", + "text_level": 1, + "bbox": [ + 199, + 85, + 799, + 130 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Supplementary Material", + "bbox": [ + 380, + 141, + 614, + 162 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A. Limitations and Future Work", + "text_level": 1, + "bbox": [ + 89, + 178, + 367, + 194 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "This study primarily focuses on scaling tokenizers for class-conditional image generation. While we have demonstrated the effectiveness of GigaTok for downstream class-conditional generation, expanding the scope to include text-conditional image generation or video generation remains an open avenue for future work. Additionally, unlike CNN-based 2D tokenizers, 1D Transformer-based tokenizers are not directly applicable to multiple resolutions without additional training adjustments. This challenge presents an important direction for further exploration. Besides scaling the model sizes of tokenizers, the effect of scaling training data, codebook dimension and codebook size for downstream autoregressive generation are left for future research.", + "bbox": [ + 89, + 204, + 483, + 401 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "B. Configurations for AR models", + "text_level": 1, + "bbox": [ + 89, + 416, + 370, + 431 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/6bae227554a03c1775492f7d81adf5e2f053dba39d655da7c38297c685182938.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
SizeParams.BlocksHeadsDim.
B111M1212768
L343M24161024
XL775M36201280
XXL1.4B48241536
", + "bbox": [ + 148, + 454, + 426, + 541 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Table 8. Architectures of the LLamaGen models in our experiments.", + "bbox": [ + 89, + 551, + 482, + 579 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "AR model training. We scale up the training of downstream Llama-style [50, 54] AR models to compare generation performance with other models. For model training, we use WSD learning rate scheduler [20, 25] with $1 \\times 10^{-4}$ base learning rate, 0.2 decay ratio and 1 epoch warm-up. We do not use AdaLN [44, 53] as it is specific for class-conditional generation. We use a batch size of 256 for training the B, L and XL models and a 512 batch size for training the XXL model. Our AR models are trained for 300 epochs on the $256 \\times 256$ ImageNet training set.", + "bbox": [ + 89, + 598, + 482, + 748 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "CFG for gFID. Since gFID of GPT models can be largely affected by classifier free guidance (CFG) [47, 50] and often has an optimal CFG [50], for fair comparison, we search the optimal CFG using zero-order search with a step of 0.25 and use the lowest gFID as the final value. For AR Probing, we use constant CFG scheduling for simplicity. For system-level comparison, we use a step function for CFG scheduling inspired by [31]. Specifically, the AR models predict the first $18\\%$ tokens without CFG, i.e., $\\mathrm{CFG} = 1$ for better diversity, and use CFG for the remaining tokens", + "bbox": [ + 89, + 750, + 482, + 901 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/0aabb2d424621a2772efa50fc41a2ba598fc16ac33f0b541e7ec1e13c62263ba.jpg", + "image_caption": [ + "Figure 10. The architecture of GigaTok with Q-Former." + ], + "image_footnote": [], + "bbox": [ + 517, + 178, + 906, + 393 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/55b90d7df7255b979b3f0cfa226e89b47598a89009decc9c68523c8829374359.jpg", + "image_caption": [ + "Figure 11. Initialization of 1D queries in Q-Former modules." + ], + "image_footnote": [], + "bbox": [ + 516, + 435, + 903, + 551 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "for better visual quality. Interestingly, we find that the 1.4B LlamaGen model achieves the best gFID without CFG.", + "bbox": [ + 511, + 604, + 905, + 635 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "C. Detailed GigaTok Implementation", + "text_level": 1, + "bbox": [ + 511, + 648, + 828, + 666 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Please refer to Tab. 9 for training details.", + "bbox": [ + 511, + 674, + 782, + 688 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Q-Fomrer in GigaTok. GigaTok utilizes Q-Former [6, 34] to build 1D tokenizers, as shown in Fig. 10. For Q-Former encoder in GigaTok, we initialize the 1D queries initialized from the 2D input features of the CNN encoder using a multi-level average pooling strategy, as shown in Fig. 11. Specifically, for the same 2D input features, we spatially divide them with different granularity at different levels, and perform average pooling for every divided region at each level. The pooled features are flattened and concatenated from level 0 to the last level. Therefore, a 1D token sequence with $2^{L}$ length can be initialized with $L$ levels from 2D input features. At the decoding stage, the 2D queries are all initialized from the first 1D latent feature.", + "bbox": [ + 511, + 688, + 905, + 883 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Entropy Loss for VQ Tokenizers. While entropy loss [67,", + "bbox": [ + 511, + 885, + 903, + 901 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/a2be56996275f976cf1f457e4c8dd1af6d6f1e9a7b132e726a12ef2c4f015402.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ConfigurationS-SS-BS-LB-LXL-XXL
Q-Former Encoder depth6661236
Q-Former Encoder heads8881220
Q-Former Encoder dim.5125125127681280
Q-Former Decoder depth612242448
Q-Former Decoder heads.812161624
Q-Former Decoder dim.512768102410241536
Params (M)1362325336222896
Codebook size16384
Codebook dimension8
#Tokens256
Training epochs100200200200300
Batch size128128256256256
Alignment Layer l3
Learning rate scheduleCosine Decay
Base learning rate\\( 1 \\times 10^{-4} \\)
Minimum learning rate\\( 1 \\times 10^{-5} \\)
LR warm-up iterations00005000
OptimizerAdamW[39]
Opt. momentum\\( \\beta_1 = 0.9, \\beta_2 = 0.95 \\)
Entropy Loss weight0000\\( 5 \\times 10^{-3} \\)
", + "bbox": [ + 263, + 88, + 736, + 436 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Table 9. GigaTok configuration and default training details", + "bbox": [ + 323, + 445, + 676, + 460 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "[69] is discussed for LFQ [69], its application to VQ tokenizers is less commonly explained. We provide a detailed derivation of the entropy loss specifically for VQ tokenizers. Mathematically, for quantization process from continuous vector $\\mathbf{z} \\in \\mathbb{R}^D$ to quantized vector $\\hat{\\mathbf{z}} = \\mathbf{c}_i \\in \\mathbb{R}^D$ where $\\mathbf{c}_i$ is the $i$ -th codebook vector from codebook $\\mathbf{C} \\in \\mathbb{R}^{N \\times D}$ , we assume this process is statistical and follows the following distribution:", + "bbox": [ + 89, + 484, + 482, + 607 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\np (\\hat {\\mathbf {z}} = \\mathbf {c} _ {i} | \\mathbf {z}) \\triangleq \\operatorname {s o f t m a x} (- l _ {2} (\\mathbf {z}, \\mathbf {C})) [ i ] \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 158, + 618, + 482, + 638 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "where $l_{2}(\\mathbf{z},\\mathbf{C})\\in \\mathbb{R}^{N}$ is the $L_{2}$ distance between $\\mathbf{z}$ and all the codebook vectors. Then, minimization of the quantization error can be partially achieved by minimizing the expectation of entropy $\\mathbb{E}_{\\mathbf{z}}[H(\\hat{\\mathbf{z}} |\\mathbf{z})]$ , which can be understood as maximizing the prediction confidence for $p(\\hat{\\mathbf{z}} |\\mathbf{z})$ . To encourage higher codebook utilization, we aim to make the average appearance probability of codebook vectors more uniform. This is achieved by maximizing the entropy $H(\\hat{\\mathbf{z}})$ . Therefore, the optimization of the two entropy terms leads to the final entropy loss equation:", + "bbox": [ + 89, + 647, + 483, + 800 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {e n t r o p y}} = \\mathbb {E} _ {\\mathbf {z}} [ H (\\hat {\\mathbf {z}} | \\mathbf {z}) ] - H (\\hat {\\mathbf {z}}) \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 181, + 811, + 482, + 830 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In practice, to calculate $H(\\hat{\\mathbf{z}})$ , we estimate $p(\\hat{\\mathbf{z}} = \\mathbf{c}_i)$ by $p(\\hat{\\mathbf{z}} = \\mathbf{c}_i) = \\mathbb{E}_{\\mathbf{z}}[p(\\hat{\\mathbf{z}} = \\mathbf{c}_i|\\mathbf{z})]$ . Note that entropy loss is not our contribution. We only provide a detailed definition of entropy loss in VQ scenarios for better understanding.", + "bbox": [ + 89, + 839, + 483, + 902 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Additional implementation details. To stabilize the training of our tokenizer with a hybrid architecture, we initially use a shortcut feature reconstruction trick at the first 15k iterations of the tokenizer training. But we later found that this trick can be replaced with a simple 1-epoch learning rate warmup combined with entropy loss [15, 69]. Specifically for this trick, we additionally give the output feature of the CNN encoder to the CNN decoder directly to be trained for reconstruction, and also align the output feature of the Transformer decoder to the output feature of the CNN encoder, besides the original training objectives. Note that this strategy is complex and can even hinder performance for XL-XXL tokenizers. We recommend using the learning rate warmup combined with entropy loss [15, 69] instead, for both XL-XXL tokenizer and the smaller ones. Additionally, we utilize the rotation trick [17] for all tokenizers, though we observe its effect on performance to be limited for our tokenizer. The implementation of the semantic regularization is partially inspired by REPA [71].", + "bbox": [ + 511, + 484, + 906, + 773 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "D. Full Evaluation Results and Analysis", + "text_level": 1, + "bbox": [ + 511, + 784, + 852, + 801 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Here we present the full evaluation results for the tokenizers and downstream AR models, as summarized in Tab. 10. We observe that scaling up visual tokenizers consistently improves the reconstruction quality across multiple metrics. Interestingly, for the 1.4B AR model, the lowest gFID is obtained without applying any CFG. This phenomenon is", + "bbox": [ + 511, + 810, + 908, + 902 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/c430d54c28f1d7deb48c5ced503e66bfc7c3f7e549a1d36977dcd91509812eac.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
TokenizerParam. rFID↓LPIPS↓PSNR↑SSIM↑AR ModelParam. gFID↓Acc.↑IS↑Precision↑Recall↑
LlamaGen-Tok. [50]72M2.19-20.790.675LlamaGen-B [50]111M5.46-193.610.830.45
GigaTok-S-S136M1.010.222620.740.670LlamaGen-B (1d) [50]111M4.0562.6240.610.810.51
GigaTok-S-B232M0.890.212120.930.677LlamaGen-B (1d) [50]111M3.8362.9233.310.830.51
GigaTok-B-L622M0.810.205921.210.685LlamaGen-B (1d) [50]111M3.2667.6221.020.810.56
LlamaGen-XXL (1d) [50]1.4B2.03*69.4238.520.800.63
GigaTok-B-L622M0.51‡0.20621.320.691LlamaGen-B (1d) [50]111M3.3367.7265.430.800.56
GigaTok-XL-XXL2.9B0.790.194721.650.699LlamaGen-B (1d) [50]111M3.1572.0224.280.820.55
LlamaGen-XXL (1d) [50]1.4B1.98*74.0256.760.810.62
", + "bbox": [ + 91, + 88, + 946, + 234 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Table 10. Full results for our tokenizers and AR models on ImageNet ${256} \\times {256}$ . For gFID,we present the lowest value between w/ or w/o CFG scenarios. $\\ddagger$ : Using frozen DINO [7] for discriminator,which largely improves rFID. $\\star$ : Without classifier-free-guidance.", + "bbox": [ + 89, + 246, + 906, + 275 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "also observed in the concurrent work FlexTok [3], despite significant differences between GigaTok and FlexTok. We hypothesize that semantic regularization might be the primary contributing factor for this phenomenon.", + "bbox": [ + 88, + 301, + 480, + 359 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Discussion on Scaling and Enhancing the Discriminator. Recently, VAR [53], ImageFolder [36], and the concurrent work UniTok [41] have begun leveraging DINO-based discriminators [7, 43] to enhance tokenizer training, achieving impressive improvements in rFID scores. We have also experimented with the same DINO discriminator configuration as VAR. Our results indicate that although rFID scores improve, the downstream generation quality improvements are less significant, as detailed in Tab. 10. Furthermore, when applying the DINO discriminator to XL-XXL tokenizers, we observed that adversarial training frequently encounters instability. Specifically, a strong discriminator quickly learns to distinguish reconstructed samples, diminishing the benefits of adversarial training and leading to blurry artifacts. We leave further exploration of discriminator scaling and enhancement strategies for future work.", + "bbox": [ + 89, + 362, + 482, + 603 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "E. Training Tokenizers for More Iterations", + "text_level": 1, + "bbox": [ + 89, + 618, + 452, + 633 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "While we largely resolve the reconstruction vs. generation dilemma regarding tokenizer model scaling, this challenge persists for tokenizer training duration scaling. To illustrate this phenomenon, we train five S-S tokenizers ranging from 40 to 120 epochs using a cosine learning rate scheduler, as detailed in Tab. 9. The results are presented in Fig. 12.", + "bbox": [ + 89, + 643, + 482, + 748 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "When extending tokenizer training iterations, reconstruction quality consistently improves. However, downstream generation quality initially improves but subsequently degrades with further increases in tokenizer training duration. Additionally, the validation loss of AR probing continuously rises with longer tokenizer training, regardless of semantic regularization. This trend suggests an increasing complexity in the tokenizer's latent space as the training duration extends.", + "bbox": [ + 89, + 750, + 482, + 883 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We hypothesize that data scaling may alleviate this is-", + "bbox": [ + 109, + 885, + 482, + 900 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "sue, and leave it for future exploration. In practice, allocating computational resources toward model scaling rather than extended training duration may yield better tokenizer performance.", + "bbox": [ + 511, + 301, + 906, + 362 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "F. Linear Probing Accuracy of Tokenizers", + "text_level": 1, + "bbox": [ + 511, + 378, + 870, + 395 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We show that the linear probing accuracy of the tokenizer encoders may not necessarily indicate the performance of downstream AR models. We utilize the intermediate checkpoints during the training of B-L and XL-XXL tokenizers for evaluation. As shown in Fig. 13, the XL-XXL tokenizer encoder presents an overfitting trend in terms of tokenizer encoder linear probing accuracy. However, this overfitting trend is not reflected in AR Probing linear probing accuracy or gFID. Therefore, the linear probing accuracy of the tokenizer encoders may not be a good indicator of downstream model performance. Similarly, a concurrent work UniTok [41], also points out that the performance of the tokenizer encoder in terms of zero-shot ImageNet classification accuracy may not necessarily reflect the visual understanding ability of downstream LLMs trained on the tokenizer.", + "bbox": [ + 511, + 404, + 906, + 630 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The abnormality for large tokenizers reveals that the linear probing accuracy of the tokenizer is not necessarily a good indicator for downstream generation models. Since we care more about the representation learning for downstream models than for the tokenizers, using AR Probing as a direct evaluating method is better than indirect tokenizer linear probing accuracy.", + "bbox": [ + 511, + 631, + 905, + 737 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "G. More Discussions About Related Work", + "text_level": 1, + "bbox": [ + 511, + 753, + 867, + 768 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "TiTok [70] explores the use of 1D Transformer-based tokenizers under a high compression rate setting. TiTok seminally explores the model scaling of visual tokenizers and uses larger tokenizers for higher compression rate. However, the reconstruction vs. generation dilemma for scaling tokenizers is not solved in TiTok. As a result, the best generation model in TiTok is still trained on its smallest tokenizer variant.", + "bbox": [ + 511, + 779, + 905, + 898 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/e205c5e0363951781746ffb7f15f0fea7dee10370ed96939e69739e03db2597f.jpg", + "image_caption": [ + "Figure 12. Training duration scaling trends of tokenizers for reconstruction, downstream generation and representation quality with and without semantic regularization. Note that in the last two figures, the red and blue curves correspond to different scales on the y-axis." + ], + "image_footnote": [], + "bbox": [ + 96, + 111, + 236, + 229 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/b397294c7c93b5ced9f442cccae73fbd6a03a99b75c046a878da0d7cb5af83a2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 248, + 111, + 395, + 229 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/a0b2c71f7aa19b9521226367fa6f7ed0a44912ef10f9158ae65713269a8c6225.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 400, + 111, + 578, + 229 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/501724ecdc813e1787bc98b3a565459f1ad00649558af51fafe3505ee36c94c5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 583, + 111, + 730, + 229 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/31b1d65e2e767d9a05af9902ebe910c062397920ef6b942299dd20cf5271bfa2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 733, + 111, + 901, + 229 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/5b9beeef0eac54b87f8240947c7f51792dba750f90fc05d1fd7ac427dc9bfe16.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 223, + 311, + 377, + 325 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/2610462c1e32867a02d8209cfa0a22885a2e20c2ae3cb320c384a615f8ad054c.jpg", + "image_caption": [ + "Figure 13. The linear probing accuracy of tokenizer encoders does not necessarily reflect downstream model performance. As the training proceeds, the XL-XXL tokenizer encoder presents an overfitting trend measured by linear probing accuracy, but downstream model performances consistently improve." + ], + "image_footnote": [], + "bbox": [ + 94, + 332, + 215, + 439 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/3e802de3a2022c636c39f13e07b3c7a49f1c95e42b86c55ecc2c4e86f8885e6d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 218, + 332, + 346, + 439 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/c3261129c9a4a3015e129369727cb9928d0a5508af5ef7763dbd5e72adc541d6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 357, + 332, + 473, + 439 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "ViTok [21] is a concurrent work which has explored the effect of model scaling for VAE [28]. ViTok evaluates its VAE models in terms of both reconstruction and downstream diffusion generation performance. While having a very different setting from GigaTok, ViTok similarly finds that asymmetric design is better for VAEs. While ViTok suggests that small encoders are optimal, we point out that in our setting scaling encoders is also beneficial. Notably, the reconstruction vs. generation dilemma for scaling visual tokenizers is not solved in ViTok. We hypothesize that adding semantic regularization may similarly help solve the tokenizer scaling dilemma for VAEs, but leave it for future study.", + "bbox": [ + 89, + 550, + 482, + 731 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "MAGVIT-v2 [69] introduces LFQ to enhance discrete tokenizers. It also introduces the entropy penalty for tokenizer training, which is shown to be important for training large-scale tokenizers in our work. Instead of tokenizer model scaling, MAGVIT-v2 focuses more on scaling the codebook size of tokenizers. While codebook dimension and codebook size are important bottlenecks for visual tokenizers, we point out that model size scaling is also an important way for improving visual tokenizers.", + "bbox": [ + 89, + 733, + 482, + 868 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "ImageFolder [36] utilizes two branches for image encoding to handle high-level semantic information and low-level", + "bbox": [ + 89, + 869, + 482, + 900 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "visual details respectively. It seminally utilizes semantic alignment to enhance the learned representation of tokenizers.", + "bbox": [ + 511, + 311, + 903, + 354 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "VA-VAE [65] tames the reconstruction vs. generation dilemma in increasing latent dimensions for continuous VAE [28, 29]. VA-VAE improves the reconstruction-generation Pareto Frontier by introducing vision foundation model alignment loss. In contrast, we seek continuous improvements in both reconstruction and generation by scaling tokenizers. Semantic regularization serves different purposes in the two works.", + "bbox": [ + 511, + 357, + 906, + 479 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "w/o semantic regularization w/ semantic regularization", + "bbox": [ + 326, + 90, + 730, + 108 + ], + "page_idx": 15 + } +] \ No newline at end of file diff --git a/data/2025/2504_08xxx/2504.08736/eda122d9-2e32-4d6c-a34a-34f5d8cbbcb6_model.json b/data/2025/2504_08xxx/2504.08736/eda122d9-2e32-4d6c-a34a-34f5d8cbbcb6_model.json new file mode 100644 index 0000000000000000000000000000000000000000..daa1a9e5da360263036b285be57b299b065004f5 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/eda122d9-2e32-4d6c-a34a-34f5d8cbbcb6_model.json @@ -0,0 +1,3301 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.2, + 0.131, + 0.8, + 0.177 + ], + "angle": 0, + "content": "GigaTok: Scaling Visual Tokenizers to 3 Billion Parameters for Autoregressive Image Generation" + }, + { + "type": "text", + "bbox": [ + 0.134, + 0.203, + 0.262, + 0.222 + ], + "angle": 0, + "content": "Tianwei Xiong" + }, + { + "type": "text", + "bbox": [ + 0.305, + 0.204, + 0.425, + 0.221 + ], + "angle": 0, + "content": "Jun Hao Liew2" + }, + { + "type": "text", + "bbox": [ + 0.466, + 0.204, + 0.585, + 0.222 + ], + "angle": 0, + "content": "Zilong Huang" + }, + { + "type": "text", + "bbox": [ + 0.629, + 0.204, + 0.727, + 0.222 + ], + "angle": 0, + "content": "Jiashi Feng2" + }, + { + "type": "text", + "bbox": [ + 0.769, + 0.204, + 0.862, + 0.221 + ], + "angle": 0, + "content": "Xihui Liu\\(^{1\\dagger}\\)" + }, + { + "type": "text", + "bbox": [ + 0.275, + 0.222, + 0.519, + 0.24 + ], + "angle": 0, + "content": "1The University of Hong Kong" + }, + { + "type": "text", + "bbox": [ + 0.584, + 0.222, + 0.725, + 0.239 + ], + "angle": 0, + "content": "2ByteDance Seed" + }, + { + "type": "text", + "bbox": [ + 0.299, + 0.24, + 0.696, + 0.257 + ], + "angle": 0, + "content": "Project page: https://silentview.github.io/GigaTok/" + }, + { + "type": "title", + "bbox": [ + 0.249, + 0.292, + 0.326, + 0.308 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.324, + 0.485, + 0.718 + ], + "angle": 0, + "content": "In autoregressive (AR) image generation, visual tokenizers compress images into compact discrete latent tokens, enabling efficient training of downstream autoregressive models for visual generation via next-token prediction. While scaling visual tokenizers improves image reconstruction quality, it often degrades downstream generation quality—a challenge not adequately addressed in existing literature. To address this, we introduce GigaTok, the first approach to simultaneously improve image reconstruction, generation, and representation learning when scaling visual tokenizers. We identify the growing complexity of latent space as the key factor behind the reconstruction vs. generation dilemma. To mitigate this, we propose semantic regularization, which aligns tokenizer features with semantically consistent features from a pre-trained visual encoder. This constraint prevents excessive latent space complexity during scaling, yielding consistent improvements in both reconstruction and downstream autoregressive generation. Building on semantic regularization, we explore three key practices for scaling tokenizers: (1) using 1D tokenizers for better scalability, (2) prioritizing decoder scaling when expanding both encoder and decoder, and (3) employing entropy loss to stabilize training for billion-scale tokenizers. By scaling to 3 billion parameters, GigaTok achieves state-of-the-art performance in reconstruction, downstream AR generation, and downstream AR representation quality." + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.746, + 0.222, + 0.761 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.771, + 0.483, + 0.877 + ], + "angle": 0, + "content": "Autoregressive (AR) language models (LM) have emerged as a promising approach for visual generation [15, 50, 66, 69], driven by their proven scalability [2, 5, 14, 19, 37, 51, 52, 54, 55] and the potential for unified multimodal modeling [12, 45, 62]. The AR image generation framework consists of a visual tokenizer and a downstream AR generator. The tokenizer encodes images into discrete tokens, trained" + }, + { + "type": "image", + "bbox": [ + 0.52, + 0.294, + 0.623, + 0.367 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.626, + 0.296, + 0.695, + 0.366 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.7, + 0.296, + 0.758, + 0.366 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.762, + 0.296, + 0.829, + 0.366 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.831, + 0.296, + 0.898, + 0.366 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.521, + 0.372, + 0.623, + 0.495 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.629, + 0.372, + 0.691, + 0.424 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.693, + 0.372, + 0.757, + 0.493 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.761, + 0.372, + 0.828, + 0.493 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.831, + 0.372, + 0.898, + 0.493 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.719, + 0.493, + 0.869, + 0.502 + ], + "angle": 0, + "content": "Better generation with larger tokenizer" + }, + { + "type": "image_caption", + "bbox": [ + 0.512, + 0.517, + 0.907, + 0.587 + ], + "angle": 0, + "content": "Figure 1. Reconstruction vs. generation dilemma: Naively scaling visual tokenizers achieves better reconstruction but degrades downstream autoregressive (AR) generation. In contrast, GigaTok achieves better performance for both reconstruction and generation as tokenizers scale up." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.608, + 0.906, + 0.699 + ], + "angle": 0, + "content": "with image reconstruction supervision, while the AR generator models the distribution of these discrete tokens through next-token prediction. The image tokenizer plays a pivotal role in AR visual generation, providing a compact and expressive latent space that enables effective generative modeling by downstream AR models." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.701, + 0.907, + 0.883 + ], + "angle": 0, + "content": "Despite its pivotal role, scaling of visual tokenizer is rarely explored in the literature. In fact, unlike the downstream AR models whose scalability has been widely validated [12, 30, 60, 62], scaling the visual tokenizer presents a significant challenge. Specifically, there exists a reconstruction vs. generation dilemma, where scaling tokenizer improves reconstruction fidelity but degrades downstream generation quality, as shown in Fig. 1. This dilemma is also observed in prior works [13, 21]. In this work, we seek to overcome this limitation and explore strategies for effectively scaling tokenizers to enhance both reconstruction and generation performance." + }, + { + "type": "text", + "bbox": [ + 0.534, + 0.886, + 0.905, + 0.902 + ], + "angle": 0, + "content": "To investigate the root cause of this dilemma, we propose" + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.275, + 0.059, + 0.725 + ], + "angle": 270, + "content": "arXiv:2504.08736v2 [cs.CV] 24 Aug 2025" + }, + { + "type": "page_footnote", + "bbox": [ + 0.093, + 0.887, + 0.225, + 0.9 + ], + "angle": 0, + "content": "† Corresponding Author." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.096, + 0.091, + 0.904, + 0.301 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.098, + 0.314, + 0.898, + 0.329 + ], + "angle": 0, + "content": "Figure 2. The 2.9B GigaTok achieves SOTA autoregressive image generation with a 1.4B AR model on ImageNet \\(256\\times 256\\) resolution." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.356, + 0.483, + 0.476 + ], + "angle": 0, + "content": "an AR probing scheme that trains a lightweight downstream generative AR model to monitor the tokenizer's training process. Surprisingly, we find that as tokenizers scale, the downstream AR model struggles more to learn the resulting token distribution, as evidenced by the increasing AR generation loss. This suggests that the larger tokenizers produce a more complex token space, making it increasingly difficult for AR models to learn effectively." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.478, + 0.483, + 0.78 + ], + "angle": 0, + "content": "To address this challenge, we introduce pre-trained visual representation models (e.g. DINOv2 [43]) to regularize tokenizers. Specifically, we leverage a semantic regularization loss during tokenizer training, encouraging high similarity between tokenizer features and the pre-trained model features. Such regularization helps constrain the latent space complexity, preventing the tokenizer from learning overly complicated latent token dependencies that hinder downstream AR generative modeling. Moreover, we design a vector-quantized (VQ) tokenizer with a hybrid CNN-Transformer architecture as the backbone, suitable for both 1D and 2D tokenizers, and explore best practices for scaling tokenizers: (1) 1D tokenizers exhibit better scalability compared to 2D tokenizers; (2) Asymmetric model scaling, prioritizing decoder scaling over encoder scaling, proves effective; (3) Entropy loss [69] becomes crucial for convergence when training tokenizers with billion-level parameters. With our semantic regularization and three key scaling strategies, we effectively scale GigaTok to 3 billion parameters, overcoming the reconstruction vs. generation dilemma." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.78, + 0.403, + 0.794 + ], + "angle": 0, + "content": "We summarize our contributions as follows:" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.796, + 0.483, + 0.871 + ], + "angle": 0, + "content": "- We identify that the reconstruction vs. generation dilemma in tokenizer scaling stems from increased latent space complexity in larger tokenizers. To address this, we propose semantic regularization, effectively mitigating the dilemma and enabling tokenizer scaling." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.871, + 0.484, + 0.902 + ], + "angle": 0, + "content": "- We explore best practices for scaling tokenizers, including 1D tokenizers with hybrid CNN-Transformer archi" + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.796, + 0.484, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.527, + 0.356, + 0.907, + 0.385 + ], + "angle": 0, + "content": "tecture, asymmetric encoder-decoder scaling, and entropy loss for billion-scale tokenizers." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.386, + 0.907, + 0.432 + ], + "angle": 0, + "content": "- Our GigaTok is the first tokenizer scaled to 3B, achieving state-of-the-art reconstruction, downstream AR generation, and downstream AR representation on ImageNet." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.449, + 0.655, + 0.465 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.476, + 0.907, + 0.717 + ], + "angle": 0, + "content": "Image tokenizers. Image tokenizers map image inputs into discrete [15, 56, 66] or continuous [28] tokens which can be modeled by downstream generative models. For discrete tokenizers, Vector Quantization (VQ) [15, 56, 66] is dominantly adopted. Recently, new quantization methods [49, 69, 75, 76] have also been proposed for better scaling of codebook size. However, how to properly scale up tokenizer models is insufficiently studied in existing literature. ViT-VQGAN [66] and TiTok [70] utilize transformer architecture to enable convenient scaling of tokenizers, but end up training their best generative models on smaller tokenizer versions. A concurrent work, ViTok [76], suggests de-prioritizing VAE scaling due to its less predictable effect for downstream diffusion models. We observe a similar reconstruction vs. generation dilemma in scaling discrete tokenizers, and provide our analysis and solution to it." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.719, + 0.909, + 0.854 + ], + "angle": 0, + "content": "Autoregressive Visual Generation. Autoregressive visual generative models [33, 38, 40, 49, 50, 56, 58, 60, 66] follow the next-token-prediction (NTP) approach of LLMs, enabling the leverage of advancements in LLMs and simplifying the path to unified multi-modal generation. Other variants utilize visual-specific paradigms such as mask image modeling [8, 61, 69, 70] and next-scale-prediction [36, 53] for better performance. We reveal that scaling tokenizers helps NTP AR models to be comparable to these variants." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.856, + 0.909, + 0.903 + ], + "angle": 0, + "content": "Semantic Guidance for Visual Generative Models and Tokenizers. The guidance from visual foundation models [7, 23, 43, 46, 72] has been used to improve training" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.485, + 0.258 + ], + "angle": 0, + "content": "convergence speed and quality [65, 71] of visual generative models, as well as enhancing representation quality or downstream performance of visual tokenizers [9, 10, 18, 36, 41, 59, 63–65, 68, 73, 76, 77]. REPA [71] presents impressive performance improvements brought by a simple representation alignment strategy, and recently, VA-VAE [65] shows the significant benefits of semantic guidance to the reconstruction-generation Pareto Frontier of VAEs. Different from existing work, GigaTok novelly reveals the critical role of semantic regularization for resolving the reconstruction vs. generation dilemma in scaling visual tokenizers." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.271, + 0.21, + 0.287 + ], + "angle": 0, + "content": "3. Pilot Study" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.296, + 0.484, + 0.372 + ], + "angle": 0, + "content": "We first introduce AR Probing as a proxy to effectively monitor the tokenizer's effectiveness for downstream generation (Sec 3.1), followed by a pilot experiment that investigates the reconstruction vs. generation challenges when naively scaling visual tokenizers (Sec 3.2)." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.381, + 0.415, + 0.396 + ], + "angle": 0, + "content": "3.1. AR Probing for Tokenizer Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.403, + 0.484, + 0.584 + ], + "angle": 0, + "content": "In autoregressive visual generation, the training of the tokenizer and downstream AR model are performed in separate stages. In the first stage, a visual tokenizer is trained to compress images into discrete tokens, optimized with reconstruction objective. In the second stage, the downstream generative model is trained based on the discrete tokens from the pre-trained tokenizer. However, a tokenizer that performs well in terms of reconstruction fidelity in the first stage may not necessarily lead to better performance for downstream generative models. Thus, it is crucial to evaluate the effectiveness of the trained tokenizers for downstream generation alongside its reconstruction quality." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.585, + 0.484, + 0.719 + ], + "angle": 0, + "content": "Despite its importance, assessing how a tokenizer influences downstream generation models can be computationally expensive. For example, sufficiently training a 343M parameter downstream AR generator takes 170 hours on 64 V100 GPUs. To address this challenge, we introduce AR Probing, inspired by Linear Probing in representation learning literature [11, 23]. The key idea is to use the performance of a small AR model as a proxy to reflect the performance trends of large-scale AR models." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.72, + 0.484, + 0.87 + ], + "angle": 0, + "content": "Specifically, we use the tokenizer to train a small Llama-style model [50, 54] (111M parameters) for 50 epochs, and evaluate its gFID [24], validation loss, and linear probing accuracy [11, 23] for a fair comparison between different tokenizers. Training the proposed AR Probing model for evaluating tokenizers is \\(10 \\times\\) more efficient than training the original 343M downstream AR model. Our experiments in Sec. 5.1 (Fig. 6) demonstrate that the trends observed with AR Probing align with the performance of the large-scale AR models after sufficient training." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.871, + 0.484, + 0.902 + ], + "angle": 0, + "content": "gFID. The generation FID [24] of AR probing indicates the overall image generation performance of the two-stage" + }, + { + "type": "image", + "bbox": [ + 0.52, + 0.092, + 0.645, + 0.173 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.645, + 0.092, + 0.775, + 0.173 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.781, + 0.092, + 0.903, + 0.173 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.187, + 0.906, + 0.271 + ], + "angle": 0, + "content": "Figure 3. Scaling trend for vanilla 1D tokenizers. As the model size increases, the reconstruction quality of vanilla tokenizers improves but the downstream AR Probing gFID consistently degrades. The increasing AR Probing validation loss indicates that scaling vanilla tokenizers results in a more complex latent space, making it difficult for AR models to learn effectively." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.285, + 0.905, + 0.345 + ], + "angle": 0, + "content": "framework. It reflects both the reconstruction fidelity of the tokenizer and how well the downstream AR probing model can learn the dependency of the visual tokens (i.e., learnability of the token distribution)." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.346, + 0.906, + 0.512 + ], + "angle": 0, + "content": "Validation loss. We use the validation loss of the AR probing model to measure the learnability of the latent tokens as a disentangled factor. The validation loss is calculated as an average of the token-wise cross-entropy loss in the next-token-prediction paradigm on ImageNet [48] 50k validation set. With the same vocabulary size, the same number and structure of visual tokens, and the same AR probing model, larger validation loss indicates a latent space that is more difficult for the AR model to learn. Therefore, we use validation loss to reflect the latent space complexity and learnability for AR models." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.512, + 0.906, + 0.634 + ], + "angle": 0, + "content": "Linear probing accuracy. Beyond visual generation quality, we also investigate whether scaling tokenizers will lead to better visual representations of AR models, which may provide inspiration for future research in unified multimodal understanding and generation with AR models. To assess the representation quality, we adopt the standard practice [11, 23] of linear probing accuracy using features from the middle Transformer layer of the AR probing model." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.643, + 0.884, + 0.659 + ], + "angle": 0, + "content": "3.2. Naively Scaling Tokenizers Does Not Work" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.666, + 0.906, + 0.877 + ], + "angle": 0, + "content": "To study the challenges when naively scaling visual tokenizers, we train three vector-quantized tokenizers1 on ImageNet [48] at \\(256 \\times 256\\) resolution with increasing model sizes. As shown in Fig. 3, as the tokenizer size increases, although the reconstruction quality (rFID) consistently improves, the AR generation performance (gFID) significantly degrades. This highlights the reconstruction vs. generation dilemma in tokenizer scaling. Moreover, we observe that the validation loss of AR Probing consistently increases as the tokenizers scale, indicating that larger tokenizers lead to complicated token dependencies that are more difficult for the AR model to learn. This observation motivates us to design the semantic regularization to constrain the latent space complexity of the tokenizer and therefore break the" + }, + { + "type": "page_footnote", + "bbox": [ + 0.514, + 0.888, + 0.792, + 0.9 + ], + "angle": 0, + "content": "The tokenizer architectures are described in Sec. 4.1" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.091, + 0.092, + 0.423, + 0.106 + ], + "angle": 0, + "content": "reconstruction vs. generation dilemma in Sec. 4.2." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.12, + 0.191, + 0.138 + ], + "angle": 0, + "content": "4. GigaTok" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.145, + 0.484, + 0.298 + ], + "angle": 0, + "content": "In this section, we introduce the model structure and training strategies for our scalable visual tokenizer, GigaTok. In Sec. 4.1, we present a tokenizer backbone supporting 1D and 2D token structures, and discuss the asymmetric scaling strategies for the encoder and decoder. In Sec. 4.2, we introduce semantic regularization, which breaks the reconstruction vs. generation dilemma by regularizing the complexity of the latent space with pre-trained visual representations. In Sec. 4.3, we show how entropy loss [69] facilitates the convergence of billion-scale tokenizers." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.306, + 0.226, + 0.321 + ], + "angle": 0, + "content": "4.1. Architecture" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.328, + 0.484, + 0.57 + ], + "angle": 0, + "content": "The CNN [32] architectures have been the dominant choices for image tokenizers [15, 40, 69, 76] due to their effectiveness in capturing fine-grained local details. Yet, Transformers are more scalable architectures with less inductive bias. Thus, we design a vector quantized tokenizer backbone with a hybrid architecture that combines CNN [15, 32] and Transformer [6, 13, 57] for encoder and decoder (Fig. 4). Specifically, our encoder consists of a series of CNN blocks that progressively downsamples the input image by a factor of \\( p \\), followed by Transformer layers and a vector quantizer to produce discrete latent codes. Similarly, our decoder consists of multiple Transformer layers, followed by CNN decoders which upsamples the features to obtain the reconstructed image2. Our tokenizer architecture can be adapted to both 1D and 2D tokenizers by using different Transformer designs introduced in the next two paragraphs." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.572, + 0.484, + 0.634 + ], + "angle": 0, + "content": "2D tokenizers with ViT. For 2D tokenizers, the Transformers in both tokenizer encoder and decoder are implemented by ViT [13] architecture. 2D structures of the latent features and tokens are preserved throughout the tokenizer." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.637, + 0.484, + 0.789 + ], + "angle": 0, + "content": "1D tokenizers with Q-Former. For 1D tokenizers, we implement the Transformer modules in both encoder and decoder as Q-Formers [6, 34]. The Q-Former in the encoder employs 1D queries, transforming 2D input features into 1D latent tokens. The Q-Former in the decoder utilizes 2D queries to transform 1D latent tokens back to 2D features, which are then passed to the CNN decoder to reconstruct images. The 1D tokenizers remove the 2D inductive bias and demonstrate better scalability than 2D tokenizers in our experiments (Sec. 5.5)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.791, + 0.484, + 0.868 + ], + "angle": 0, + "content": "Asymmetric encoder-decoder scaling. Since the decoder faces the more challenging task of reconstructing images from lossy latent codes, we adopt an asymmetric design for more efficient parameter allocation. Specifically, we scale both the encoder and decoder, while ensuring that" + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.089, + 0.907, + 0.277 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.288, + 0.907, + 0.301 + ], + "angle": 0, + "content": "Figure 4. GigaTok architecture and semantic regularization." + }, + { + "type": "image_footnote", + "bbox": [ + 0.513, + 0.302, + 0.907, + 0.358 + ], + "angle": 0, + "content": "Top: We use a hybrid CNN-Transformer design for our visual tokenizer. The transformer layers are implemented with ViT for 2D tokenizer and Q-Former for 1D tokenizer. Bottom: We use a frozen DINOv2 [43] image encoder for semantic regularization." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.376, + 0.906, + 0.438 + ], + "angle": 0, + "content": "the decoders are always larger than the encoders. In practice, we maintain the same and fixed size for the CNN encoder/decoder and only increase the depth and width of the Transformer modules for scaling." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.445, + 0.742, + 0.461 + ], + "angle": 0, + "content": "4.2. Semantic Regularization" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.467, + 0.906, + 0.587 + ], + "angle": 0, + "content": "In our pilot study (Sec. 3.2), the latent space complexity significantly increases as the tokenizer scales, which potentially leads to worse downstream AR generation for larger tokenizers. We hypothesize that larger tokenizers tend to capture excessive fine-grained low-level details for better reconstruction, resulting in overly complex latent token distributions, which makes it harder for AR models to learn the token dependencies effectively." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.588, + 0.906, + 0.708 + ], + "angle": 0, + "content": "To address this, we introduce semantic regularization to guide the tokenizer to encode a more semantically consistent latent space, which is less complex and easier for downstream generative modeling. Specifically, we introduce a simple semantic regularization term alongside the tokenizer training objective. The regularization aligns the intermediate features of the tokenizer decoder with the feature representations extracted from pre-trained frozen DINOv2 [43]." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.708, + 0.906, + 0.769 + ], + "angle": 0, + "content": "Mathematically, let \\( f^{\\mathrm{dec},l} \\) be the output feature of the \\( l \\)-th layer of the Transformer decoder, \\( f^{\\mathrm{DINO}} \\) be the semantic features of a pretrained image encoder (here DINOv2-B [43]). The semantic regularization can be represented as:" + }, + { + "type": "equation", + "bbox": [ + 0.58, + 0.78, + 0.907, + 0.821 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {r e g}} = \\frac {1}{N} \\sum_ {n = 1} ^ {N} \\sin \\left(f _ {n} ^ {\\mathrm {d e c}, l}, \\phi \\left(f _ {n} ^ {\\mathrm {D I N O}}\\right)\\right) \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.825, + 0.907, + 0.903 + ], + "angle": 0, + "content": "where \\(N\\) is the batch size, \\(n\\) is the image index, \\(\\mathrm{sim}(\\cdot ,\\cdot)\\) is a cosine similarity function, and \\(\\phi (\\cdot)\\) is an MLP that projects decoder feature \\(f^{\\mathrm{dec},l}\\) to match the channel dimension of \\(f^{\\mathrm{DINO}}\\). When training VQ tokenizers, we add the semantic regularization to the original VQGAN [15, 50] objectives:" + }, + { + "type": "page_footnote", + "bbox": [ + 0.091, + 0.875, + 0.483, + 0.901 + ], + "angle": 0, + "content": "Throughout this work, we use downsample ratio \\( p = 16 \\), codebook dimension \\( D = 8 \\), and codebook size 16384 by default." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.096, + 0.09, + 0.482, + 0.18 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.19, + 0.483, + 0.247 + ], + "angle": 0, + "content": "Figure 5. Training curves for 2.9B XL-XXL tokenizers with and without entropy loss. A 2.9B tokenizer does not converge without entropy loss. The entropy loss encourages high codebook usage and stabilizes training loss." + }, + { + "type": "equation", + "bbox": [ + 0.207, + 0.283, + 0.483, + 0.3 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {t o t a l}} = \\mathcal {L} _ {\\mathrm {v q g a n}} + \\lambda \\mathcal {L} _ {\\text {r e g}}, \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.307, + 0.484, + 0.385 + ], + "angle": 0, + "content": "and we empirically set \\(\\lambda = 0.5\\) in this work. Here \\(\\mathcal{L}_{\\mathrm{vqgan}}\\) is a combination of multiple losses, including \\(\\mathcal{L}_{\\mathrm{recon}}\\), the \\(l_{2}\\) reconstruction loss on image pixels, \\(\\mathcal{L}_{\\mathrm{percp}}\\), the perceptual loss [27, 74], \\(\\mathcal{L}_{\\mathrm{GAN}}\\), PatchGAN [26] adversarial loss, and \\(\\mathcal{L}_{\\mathrm{VQ}}\\) [15, 66] the VQ codebook loss." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.392, + 0.449, + 0.409 + ], + "angle": 0, + "content": "4.3. Entropy Loss for Billion-Level Tokenizers" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.415, + 0.484, + 0.52 + ], + "angle": 0, + "content": "When training a 2.9B tokenizer, we find that using the same training recipe as the 622M tokenizer leads to convergence failure for both perceptual loss and reconstruction loss, and consistently low codebook usage. We hypothesize that low codebook usage accounts for the convergence difficulty. To address this, we incorporate entropy penalty [67, 69] to encourage higher codebook utilization:" + }, + { + "type": "equation", + "bbox": [ + 0.182, + 0.533, + 0.483, + 0.55 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {e n t r o p y}} = \\mathbb {E} _ {\\mathbf {z}} [ H (\\hat {\\mathbf {z}} | \\mathbf {z}) ] - H (\\hat {\\mathbf {z}}) \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.561, + 0.484, + 0.729 + ], + "angle": 0, + "content": "where \\( H(\\cdot) \\) denotes the Shannon entropy, \\( \\mathbf{z} \\in \\mathbb{R}^D \\) is the input for quantizer to be quantized to \\( \\hat{\\mathbf{z}} = \\mathbf{c}_i \\in \\mathbb{R}^D \\) and \\( \\mathbf{c}_i \\) is the \\( i \\)-th codebook vector. \\( \\mathbb{E}_{\\mathbf{z}}[H(\\hat{\\mathbf{z}}|\\mathbf{z})] \\) penalizes the uncertainty in quantization to reduce quantization error, and \\( -H(\\hat{\\mathbf{z}}) \\) encourages the codebook vectors to be selected more uniformly across the entire codebook. The detailed derivation can be found in our supp. We find that the entropy penalty addresses the convergence difficulty of large tokenizers. As shown in Fig. 5, introducing entropy loss to the 2.9B tokenizer enables the codebook usage to quickly reach a high level, and the loss converges properly3." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.742, + 0.224, + 0.76 + ], + "angle": 0, + "content": "5. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.767, + 0.191, + 0.784 + ], + "angle": 0, + "content": "5.1. Settings" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.79, + 0.484, + 0.867 + ], + "angle": 0, + "content": "For scaling up visual tokenizers, we follow the architecture configurations for the Transformers in GigaTok tokenizers as summarized in Tab. 1. We evaluate the tokenizers from three perspectives: reconstruction, downstream AR generation, and downstream AR representation quality. We use" + }, + { + "type": "page_footnote", + "bbox": [ + 0.09, + 0.876, + 0.483, + 0.902 + ], + "angle": 0, + "content": "We take perceptual loss as an example, and reconstruction loss shows a similar pattern" + }, + { + "type": "table", + "bbox": [ + 0.53, + 0.089, + 0.895, + 0.237 + ], + "angle": 0, + "content": "
TypeEnc./Dec.Params.BlocksHeadsDim.
1D Tok.S26M68512
B115M1212768
L405M24161024
XL948M36201280
XXL1870M48241536
2D Tok.S19M68512
B86M1212768
L329M24161024
" + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.247, + 0.905, + 0.289 + ], + "angle": 0, + "content": "Table 1. Architectures of the transformer variants for tokenizer encoder/decoder parts in our experiments. We use Q-Former [6, 34] for 1D tokenizers and ViT [13] for 2D tokenizers." + }, + { + "type": "image", + "bbox": [ + 0.516, + 0.305, + 0.706, + 0.369 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.706, + 0.306, + 0.902, + 0.37 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.512, + 0.381, + 0.906, + 0.478 + ], + "angle": 0, + "content": "Figure 6. Correlation between AR Probing Performance and Larger AR models. For 3 tokenizers: S-S, S-L, and B-L, we present that as the tokenizer improves, the performance improvements of AR Probing correlate to the performance improvements of larger AR models. Therefore, the AR Probing can effectively indicate how the tokenizer affects downstream larger AR models with limited computational costs." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.501, + 0.906, + 0.682 + ], + "angle": 0, + "content": "rFID and LPIPS [74] to evaluate reconstruction fidelity, gFID to evaluate generation performance, and linear probing to evaluate the representation quality of the downstream AR model. Our downstream AR models are LlamaGen [50] with 1D absolute positional embedding. Our scaling experiments (Sec. 5.2) and ablation study (Sec. 5.3) use AR Probing (111M AR model described in Sec.3.1) validation loss, gFID, and linear probing to reflect the learnability of tokens, generation performance, and representation quality, respectively. While in the system-level comparison (Sec. 5.4), we train larger 1.4B AR models for comparison with previous work. More details are in the supplementary material." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.683, + 0.906, + 0.759 + ], + "angle": 0, + "content": "Effectiveness of AR Probing. As shown in Fig. 6, AR Probing performances including gFID and linear probing accuracy align with the larger LlamaGen-XL [50] model results. Therefore, we use AR Probing throughout the following experiments except for the system-level comparison." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.771, + 0.838, + 0.788 + ], + "angle": 0, + "content": "5.2. Scaling with Semantic Regularization" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.795, + 0.905, + 0.839 + ], + "angle": 0, + "content": "We demonstrate that our proposed semantic regularization resolves the reconstruction vs. generation dilemma in scaling tokenizers." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.84, + 0.906, + 0.902 + ], + "angle": 0, + "content": "Model scaling with semantic regularization. Results are shown in Fig. 7. (1) Semantic regularization improves the reconstruction fidelity, indicated by lower rFID. (2) More importantly, the AR Probing validation loss and gFID de" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.295, + 0.095, + 0.699, + 0.109 + ], + "angle": 0, + "content": "w/o semantic regularization w/ semantic regularization" + }, + { + "type": "image", + "bbox": [ + 0.097, + 0.112, + 0.233, + 0.231 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.274, + 0.112, + 0.393, + 0.231 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.401, + 0.112, + 0.585, + 0.231 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.589, + 0.112, + 0.73, + 0.231 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.73, + 0.112, + 0.902, + 0.231 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.244, + 0.907, + 0.314 + ], + "angle": 0, + "content": "Figure 7. Scaling trends of tokenizers for reconstruction, downstream generation and representation quality with and without semantic regularization. By semantic regularization, GigaTok resolves the reconstruction vs. generation dilemma for tokenizer scaling in contrast to the vanilla version without semantic regularization. Moreover, GigaTok consistently improves the representation quality of downstream AR models by scaling up visual tokenizers. Note that in the last two figures, the red and blue curves correspond to different scales on the y-axis." + }, + { + "type": "image", + "bbox": [ + 0.095, + 0.332, + 0.482, + 0.392 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.404, + 0.484, + 0.515 + ], + "angle": 0, + "content": "Figure 8. Visualization of tokenizer features with and without semantic regularization. We compute PCA among the tokenizer features of a group of images of the same \"golden retriever\" class and visualize the first 3 PCA components. We observe that the latent space of vanilla tokenizers shows inconsistent features both within a single image or across multiple semantically similar images. In contrast, GigaTok encodes images with semantic consistency and thus reduces the latent space complexity for AR models." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.535, + 0.483, + 0.64 + ], + "angle": 0, + "content": "grades for larger tokenizers without semantic regularization, showing the reconstruction vs. generation dilemma. The dilemma is addressed with semantic regularization, evidenced by the relatively constrained validation loss and consistently decreasing gFID. (3) The Linear Probing results show that semantic regularization helps AR models to learn better representations as the tokenizer model scales up." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.641, + 0.483, + 0.776 + ], + "angle": 0, + "content": "Visualization for the tokenizer feature space. We visualize the first 3 PCA components of the tokenizer features from the first Transformer decoder layer for a group of images. As shown in Fig. 8, we find the vanilla tokenizer encodes a latent space with limited semantic consistency, which potentially impairs its learnability for downstream AR models. In contrast, GigaTok presents semantically consistent patterns (Fig. 8), indicating a meaningful and consistent latent space." + }, + { + "type": "title", + "bbox": [ + 0.09, + 0.785, + 0.458, + 0.8 + ], + "angle": 0, + "content": "5.3. Asymmetric 1D Tokenizer is More Scalable" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.807, + 0.483, + 0.868 + ], + "angle": 0, + "content": "Tokenizer decoder deserves more parameters. To determine whether the decoder or encoder should be prioritized when scaling up, we compare S-B\\(^4\\) and B-S tokenizers in Tab. 2, both trained under the same setting for 100" + }, + { + "type": "table", + "bbox": [ + 0.515, + 0.332, + 0.915, + 0.43 + ], + "angle": 0, + "content": "
Enc./Dec. SizerFID↓LPIPS↓gFID↓Lin Acc.↑
B-S0.980.2216.5664.5
S-B0.940.2145.6559.8
S-L0.830.2065.1960.6
B-L0.810.2064.8266.9
" + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.441, + 0.907, + 0.497 + ], + "angle": 0, + "content": "Table 2. The results for scaling encoder/decoder. Prioritizing the scaling of decoders benefits downstream generation more than scaling encoders (S-B v.s. B-S). But scaling encoders can still bring significant improvements (S-L v.s. B-L)." + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.508, + 0.9, + 0.617 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.628, + 0.907, + 0.699 + ], + "angle": 0, + "content": "Figure 9. Scalability comparison for 1D and 2D tokenizers. Using the same training setting, 1D tokenizers shows better reconstruction (rFID) and downstream representation quality (AR Probing: Lin Acc.). For downstream generation (gFID), 1D tokenizers present a steeper improving trend than 2D tokenizers." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.719, + 0.905, + 0.78 + ], + "angle": 0, + "content": "epochs. Our results show that scaling decoders, rather than encoders, leads to greater improvements in both reconstruction and downstream generation, suggesting that decoder scaling should be prioritized." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.781, + 0.907, + 0.901 + ], + "angle": 0, + "content": "Scaling tokenizer encoder is also important. While prioritizing the scaling of tokenizer decoders yields significant benefits, we also find that scaling tokenizer encoders can further enhance downstream models. In Tab. 2, we show that a B-L tokenizer gains significant improvements compared to an S-L tokenizer. Therefore, we recommend scaling both encoders and decoders while maintaining a larger decoder than the encoder for optimal performance." + }, + { + "type": "page_footnote", + "bbox": [ + 0.091, + 0.875, + 0.483, + 0.899 + ], + "angle": 0, + "content": "X-Y tokenizer denotes X-sized encoder and Y-sized decoder. For example, S-B indicates Small encoder-Base decoder structure" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.104, + 0.089, + 0.896, + 0.589 + ], + "angle": 0, + "content": "
TokenizerTok. Type/Param.#TokensrFID↓Generator Model/Param.TypegFID↓Acc.↑
Continuous token modeling
VAE [47]KL†55M40960.27LDM-4 [47]400MDiff.3.60-
DiT-XL/2 [44]675MDiff.2.27-
SD-VAE [1]KL†84M10240.62SiT-XL/2 [42]675MDiff.2.06-
SiT-XL/2 + REPA [71]675MDiff.1.4274.6
VA-VAE [65]KL70M2560.28LightningDiT [65]675MDiff.1.35-
VAE [35]KL66M2560.53MAR-H [35]943MAR+Diff.1.5560.0°
Discrete token modeling
VQGAN [8]VQ66M2562.28MaskGIT [8]227MMask.6.18*-
TiTok-S [70]VQ72M1281.71MaskGIT-UViT-L [4, 8]287MMask.1.97-
TiTok-L [70]VQ641M322.21MaskGIT-ViT [8]177MMask.2.77-
B-AE-d32 [22]LFQ66M2561.69BiGR-XXL-d32 [22]1.5BAR+Diff2.36-
BiGR-XL-d32 [22]799MAR+Diff-69.8
VAR-Tok. [53]MSRQ†109M6801.00‡VAR-d24 [53]1.0BVAR2.09-
VAR-d30 [53]2.0BVAR1.92-
ImageFolder [36]MSRQ176M2860.80‡ImageFolder-VAR [36]362MVAR2.60-
VQGAN [15]VQ23M2564.98Taming-Tran. [15]1.4BAR15.78*-
ViT-VQGAN [66]VQ64M10241.28VIM-Large [66]1.7BAR4.17*-
RQ-VAE [33]RQ66M2563.20RQTran. [33]3.8BAR7.55*-
Open-MAGVIT2 [40]LFQ133M2561.17Open-MAGVIT2-XL [40]1.5BAR2.53-
IBQ [49]IBQ128M2561.37IBQ-XXL [49]2.1BAR2.05-
LlamaGen-Tok. [50]VQ72M2562.19LlamaGen-L [50]343MAR3.8140.5°
LlamaGen-XXL [50]1.4BAR3.09-
LlamaGen-Tok. [50]VQ72M5760.94LlamaGen-XXL [50]1.4BAR2.34-
GigaTok-B-LVQ622M2560.51‡LlamaGen-B (1d) [50]111MAR3.3367.7
GigaTok-S-SVQ136M2561.01LlamaGen-B (1d) [50]111MAR4.0562.6
GigaTok-S-BVQ232M2560.89LlamaGen-B (1d) [50]111MAR3.8362.9
GigaTok-B-LVQ622M2560.81LlamaGen-B (1d) [50]111MAR3.2667.6
LlamaGen-XXL (1d) [50]1.4BAR2.03*69.4
GigaTok-XL-XXLVQ2.9B2560.79LlamaGen-B (1d) [50]111MAR3.1572.0
LlamaGen-XXL (1d) [50]1.4BAR1.98*74.0
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.598, + 0.908, + 0.642 + ], + "angle": 0, + "content": "Table 3. System-level comparison for tokenizers and downstream generation models on ImageNet \\(256 \\times 256\\). For gFID, we present the lowest value between w/ or w/o CFG scenarios. †: Training set includes data besides ImageNet. ‡: Using frozen DINO [7] for discriminator, which largely improves rFID. ☆: Without classifier-free-guidance. ◇: Data from BiGR [22]." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.66, + 0.483, + 0.796 + ], + "angle": 0, + "content": "1D tokenizers are more scalable than 2D tokenizers. We train S-S, S-B and B-L 1D/2D tokenizers with the same setting with semantic regularization. As shown in Fig. 9, 1D tokenizers consistently achieve better rFID and AR Probing linear probing accuracy than 2D tokenizers. For AR Probing gFID, the 1D tokenizers exhibit a steeper scaling trend, eventually surpassing 2D tokenizers as the model scales. We attribute the superior scalability of 1D tokenizers to the reduced inductive bias." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.803, + 0.325, + 0.82 + ], + "angle": 0, + "content": "5.4. System-level Comparison" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.825, + 0.484, + 0.903 + ], + "angle": 0, + "content": "Experiment Settings. Using GigaTok for tokenization, we scale the training of LlamaGen [50] AR models on \\(256 \\times 256\\) ImageNet training set for 300 epochs to compare with other methods. We do not use AdaLN [44, 53] as it is specific for class-conditional generation. We provide" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.66, + 0.907, + 0.738 + ], + "angle": 0, + "content": "the results of a B-L tokenizer trained with DINO discriminator [36, 53] to fairly compare rFID. But in practice we find DINO discriminator provides limited improvement for LPIPS and may affect the training stability of billion-scale tokenizers. Therefore, we exclude it from our main design." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.75, + 0.909, + 0.903 + ], + "angle": 0, + "content": "Results. As shown in Tab. 3, our 2.9B GigaTok achieves state-of-the-art reconstruction performance (rIFD) among all discrete tokenizers. Furthermore, with our 2.9B tokenizer, the downstream 1.4B AR model achieves state-of-the-art image generation performance (gFID) among LLM-style autoregressive next-token-prediction models. VAR [53] predicts images with next-scale prediction rather than next-token-prediction, which is less compatible with language models. Our model achieves comparable gFID to VAR [53] with a simple LLM-style downstream AR genera" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.107, + 0.089, + 0.472, + 0.166 + ], + "angle": 0, + "content": "
Decoder\\AR Model SizeBLXXL
B3.7%2.3%1.3%
L11.2%7.0%3.4%
XXL32.4%20.3%9.9%
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.177, + 0.483, + 0.232 + ], + "angle": 0, + "content": "Table 4. Ratio of time consumptions for tokenizer decoding during image generation. When we use a 2.9B XLXXL tokenizer for a 1.4B LlamaGen-XXL AR model, the tokenizer decoding only takes \\(9.9\\%\\) of the total inference time." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.256, + 0.484, + 0.378 + ], + "angle": 0, + "content": "tor without incorporating vision-specific designs like VAR. Moreover, this 1.4B AR model trained on the 2.9B tokenizer achieves state-of-the-art linear probing accuracy via visual generative pretraining5. This indicates that our GigaTok helps the downstream generation model to learn better representations. The high-quality representation learned from generative pre-training may also help unify generation and understanding for future native multimodal models." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.385, + 0.363, + 0.401 + ], + "angle": 0, + "content": "5.5. Discussion and Ablation Study" + }, + { + "type": "table", + "bbox": [ + 0.093, + 0.418, + 0.487, + 0.495 + ], + "angle": 0, + "content": "
Align. Layer lrFID↓LPIPS↓gFID↓Lin Acc.↑
21.060.2246.2663.4
31.010.2236.1061.9
41.070.2236.0758.6
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.506, + 0.484, + 0.562 + ], + "angle": 0, + "content": "Table 5. Layer \\( l \\) for semantic regularization (S-S tokenizer). Smaller \\( l \\) brings better downstream AR model representations but can sacrifice reconstruction and downstream generation quality. We choose \\( l = 3 \\) by default for more balanced performance." + }, + { + "type": "table", + "bbox": [ + 0.093, + 0.588, + 0.498, + 0.665 + ], + "angle": 0, + "content": "
Sem. Enc.rFID↓LPIPS↓gFID↓Lin Acc.↑
CLIP [16, 46]0.910.2106.3561.4
SigLIP [72]0.920.2106.2056.7
DINOv2-B [43]0.850.2125.5564.4
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.675, + 0.483, + 0.718 + ], + "angle": 0, + "content": "Table 6. Ablation study for the choice of pretrained semantic encoders (S-B tokenizer). DINOv2-B delivers the best performance among all models." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.735, + 0.483, + 0.856 + ], + "angle": 0, + "content": "Discussion on generation costs. When generating an image, AR models take multiple passes to predict tokens, while tokenizers only need one forward pass. Therefore, the time consumption for decoding tokens to images is relatively small compared to AR models. We record the ratio of time spent on tokenizer decoding for different tokenizer/AR models in Tab. 4. For a 1.4B AR model, our largest 2.9B tokenizer takes only \\(\\sim 10\\%\\) of the total inference time." + }, + { + "type": "page_footnote", + "bbox": [ + 0.09, + 0.863, + 0.483, + 0.902 + ], + "angle": 0, + "content": "REPA [71] achieves better representation by directly distilling pretrained representations to the generation model, which is not a fair comparison with ours as we do not leverage the supervision for AR training." + }, + { + "type": "table", + "bbox": [ + 0.52, + 0.089, + 0.905, + 0.182 + ], + "angle": 0, + "content": "
Sem. Reg. λrFID↓LPIPS↓gFID↓Lin Acc.↑
0.251.280.2266.2757.0
0.501.220.2286.3958.6
0.751.270.2366.2958.6
1.001.380.2396.2762.5
" + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.192, + 0.907, + 0.248 + ], + "angle": 0, + "content": "Table 7. Ablation Study for the semantic regularization weight (S-S tokenizer). A strong semantic regularization weight leads to worse reconstruction but better downstream representation. We choose \\(\\lambda = 0.5\\) by default for more balanced performance." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.274, + 0.907, + 0.427 + ], + "angle": 0, + "content": "Searching the best layer for semantic regularization. We search \\( l \\), the layer's index in the Transformer decoder before intermediate features are extracted to calculate semantic regularization in Eq. 1. As shown in Tab. 5, varying \\( l \\) presents a trade-off between gFID and the Lin Acc. for AR Probing. Smaller \\( l \\) means stricter regularization for the latent space so that the downstream generation models learn better representation. However, smaller \\( l \\) also sacrifices generation quality. We choose \\( l = 3 \\) for a more balanced rFID, gFID, and linear probing accuracy for all tokenizers." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.429, + 0.909, + 0.537 + ], + "angle": 0, + "content": "Exploring pretrained semantic encoder choices. We compare CLIP (DFN) [16, 46], SigLIP-400M [72] and DINOv2-B [43] as the source of semantic regularization for S-B tokenizers. As shown in Tab. 6, utilizing DINOv2-B as the semantic encoder for regularization produces the best tokenizer for reconstruction, downstream class conditional generation and representation quality." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.538, + 0.909, + 0.645 + ], + "angle": 0, + "content": "Exploring weights for semantic regularization. We study the effects of different regularization weights \\(\\lambda\\) (Eq. 2), from 0.25 to 1.00. As shown in Tab. 7, a large \\(\\lambda\\) (0.75, 1.00) will damage the reconstruction quality but benefits the linear probing accuracy, whereas smaller \\(\\lambda\\) (0.25) results in suboptimal rFID and linear probing accuracy. We choose the more balanced \\(\\lambda = 0.5\\) as a default for all tokenizers." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.658, + 0.634, + 0.673 + ], + "angle": 0, + "content": "6. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.683, + 0.909, + 0.865 + ], + "angle": 0, + "content": "In this work, we study and address the reconstruction vs. generation dilemma for scaling visual tokenizers. We identify that the dilemma stems from increasing latent space complexity in larger tokenizers. We propose semantic regularization to effectively regularize the tokenizer latent space by injecting pre-trained representations to align with tokenizer features in training. The semantic regularization, together with several key practices we explored, lead to the first 3B tokenizer, GigaTok, that achieves state-of-the-art reconstruction, downstream AR generation, and downstream AR representation quality. Please refer to discussions on limitations and future work in supplementary materials." + } + ], + [ + { + "type": "title", + "bbox": [ + 0.092, + 0.091, + 0.251, + 0.108 + ], + "angle": 0, + "content": "Acknowledgments" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.115, + 0.482, + 0.145 + ], + "angle": 0, + "content": "This work is partially supported by the National Nature Science Foundation of China (No. 62402406)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.146, + 0.482, + 0.192 + ], + "angle": 0, + "content": "The authors also sincerely thank Qihang Yu and Liang-Chieh Chen for their valuable discussions during the development of GigaTok." + }, + { + "type": "title", + "bbox": [ + 0.093, + 0.203, + 0.188, + 0.219 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.228, + 0.482, + 0.256 + ], + "angle": 0, + "content": "[1] stabilityyai/sd-vae-ft-ema. https://huggingface.co/stabilityyai/sd-vae-ft-ema, 2023.7" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.257, + 0.483, + 0.326 + ], + "angle": 0, + "content": "[2] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.328, + 0.483, + 0.396 + ], + "angle": 0, + "content": "[3] Roman Bachmann, Jesse Allardice, David Mizrahi, Enrico Fini, Oğuzhan Fatih Kar, Elmira Amirloo, Alaaeldin El-Nouby, Amir Zamir, and Afshin Dehghan. Flextok: Resampling images into 1d token sequences of flexible length. arXiv preprint arXiv:2502.13967, 2025. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.398, + 0.483, + 0.466 + ], + "angle": 0, + "content": "[4] Fan Bao, Shen Nie, Kaiwen Xue, Yue Cao, Chongxuan Li, Hang Su, and Jun Zhu. All are worth words: A vit backbone for diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 22669-22679, 2023. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.468, + 0.483, + 0.535 + ], + "angle": 0, + "content": "[5] Xiao Bi, Deli Chen, Guanting Chen, Shanhuang Chen, Damai Dai, Chengqi Deng, Honghui Ding, Kai Dong, Qiushi Du, Zhe Fu, et al. Deepseek llm: Scaling open-source language models with longtermism. arXiv preprint arXiv:2401.02954, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.537, + 0.483, + 0.605 + ], + "angle": 0, + "content": "[6] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European conference on computer vision, pages 213-229. Springer, 2020. 4, 5, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.607, + 0.483, + 0.676 + ], + "angle": 0, + "content": "[7] Mathilde Caron, Hugo Touvron, Ishan Misra, Hervé Jégou, Julien Mairal, Piotr Bojanowski, and Armand Joulin. Emerging properties in self-supervised vision transformers. In Proceedings of the IEEE/CVF international conference on computer vision, pages 9650-9660, 2021. 2, 7, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.677, + 0.483, + 0.745 + ], + "angle": 0, + "content": "[8] Huiwen Chang, Han Zhang, Lu Jiang, Ce Liu, and William T Freeman. Maskgit: Masked generative image transformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11315-11325, 2022. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.747, + 0.483, + 0.815 + ], + "angle": 0, + "content": "[9] Hao Chen, Ze Wang, Xiang Li, Xineng Sun, Fangyi Chen, Jiang Liu, Jindong Wang, Bhiksha Raj, Zicheng Liu, and Emad Barsoum. Softvq-vae: Efficient 1-dimensional continuous tokenizer. arXiv preprint arXiv:2412.10958, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.817, + 0.483, + 0.873 + ], + "angle": 0, + "content": "[10] Hao Chen, Yujin Han, Fangyi Chen, Xiang Li, Yidong Wang, Jindong Wang, Ze Wang, Zicheng Liu, Difan Zou, and Bhiksha Raj. Masked autoencoders are effective tokenizers for diffusion models. arXiv preprint arXiv:2502.03444, 2025. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.874, + 0.483, + 0.901 + ], + "angle": 0, + "content": "[11] Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, and Ilya Sutskever. Generative pre" + }, + { + "type": "list", + "bbox": [ + 0.094, + 0.228, + 0.483, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.545, + 0.093, + 0.905, + 0.12 + ], + "angle": 0, + "content": "training from pixels. In International conference on machine learning, pages 1691-1703. PMLR, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.122, + 0.905, + 0.19 + ], + "angle": 0, + "content": "[12] Xiaokang Chen, Zhiyu Wu, Xingchao Liu, Zizheng Pan, Wen Liu, Zhenda Xie, Xingkai Yu, and Chong Ruan. Januspro: Unified multimodal understanding and generation with data and model scaling. arXiv preprint arXiv:2501.17811, 2025. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.192, + 0.905, + 0.234 + ], + "angle": 0, + "content": "[13] Alexey Dosovitskiy. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020. 1, 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.236, + 0.905, + 0.303 + ], + "angle": 0, + "content": "[14] Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.305, + 0.905, + 0.374 + ], + "angle": 0, + "content": "[15] Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 12873-12883, 2021. 1, 2, 4, 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.377, + 0.905, + 0.419 + ], + "angle": 0, + "content": "[16] Alex Fang, Albin Madappally Jose, Amit Jain, Ludwig Schmidt, Alexander Toshev, and Vaishaal Shankar. Data filtering networks. arXiv preprint arXiv:2309.17425, 2023. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.42, + 0.905, + 0.476 + ], + "angle": 0, + "content": "[17] Christopher Fifty, Ronald G Junkins, Dennis Duan, Aniketh Iger, Jerry W Liu, Ehsan Amid, Sebastian Thrun, and Christopher Ré. Restructuring vector quantization with the rotation trick. arXiv preprint arXiv:2410.06424, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.477, + 0.904, + 0.518 + ], + "angle": 0, + "content": "[18] Yuying Ge, Yixiao Ge, Ziyun Zeng, Xintao Wang, and Ying Shan. Planting a seed of vision in large language model. arXiv preprint arXiv:2307.08041, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.52, + 0.905, + 0.588 + ], + "angle": 0, + "content": "[19] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.591, + 0.905, + 0.646 + ], + "angle": 0, + "content": "[20] Alexander Hagiéle, Elie Bakouch, Atli Kosson, Loubna Ben Allal, Leandro Von Werra, and Martin Jaggi. Scaling laws and compute-optimal training beyond fixed training durations. arXiv preprint arXiv:2405.18392, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.648, + 0.905, + 0.716 + ], + "angle": 0, + "content": "[21] Philippe Hansen-Estruch, David Yan, Ching-Yao Chung, Orr Zohar, Jialiang Wang, Tingbo Hou, Tao Xu, Sriram Vishwanath, Peter Vajda, and Xinlei Chen. Learnings from scaling visual tokenizers for reconstruction and generation. arXiv preprint arXiv:2501.09755, 2025. 1, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.718, + 0.905, + 0.786 + ], + "angle": 0, + "content": "[22] Shaozhe Hao, Xuantong Liu, Xianbiao Qi, Shihao Zhao, Bojia Zi, Rong Xiao, Kai Han, and Kwan-Yee K Wong. Bigrr: Harnessing binary latent codes for image generation and improved visual representation capabilities. arXiv preprint arXiv:2410.14672, 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.789, + 0.905, + 0.857 + ], + "angle": 0, + "content": "[23] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, and Ross Girshick. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 16000-16009, 2022. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.859, + 0.905, + 0.902 + ], + "angle": 0, + "content": "[24] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilib-" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.905, + 0.902 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.126, + 0.093, + 0.482, + 0.119 + ], + "angle": 0, + "content": "rium. Advances in neural information processing systems, 30, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.122, + 0.483, + 0.191 + ], + "angle": 0, + "content": "[25] Shengding Hu, Yuge Tu, Xu Han, Chaoqun He, Ganqu Cui, Xiang Long, Zhi Zheng, Yewei Fang, Yuxiang Huang, Weilin Zhao, et al. Minicpm: Unveiling the potential of small language models with scalable training strategies. arXiv preprint arXiv:2404.06395, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.193, + 0.482, + 0.261 + ], + "angle": 0, + "content": "[26] Phillip Isola, Jun-Yan Zhu, Tinghui Zhou, and Alexei A Efros. Image-to-image translation with conditional adversarial networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1125-1134, 2017. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.264, + 0.482, + 0.333 + ], + "angle": 0, + "content": "[27] Justin Johnson, Alexandre Alahi, and Li Fei-Fei. Perceptual losses for real-time style transfer and super-resolution. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part II 14, pages 694-711. Springer, 2016. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.334, + 0.482, + 0.362 + ], + "angle": 0, + "content": "[28] Diederik P Kingma. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013. 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.364, + 0.482, + 0.405 + ], + "angle": 0, + "content": "[29] Diederik P Kingma, Max Welling, et al. An introduction to variational autoencoders. Foundations and Trends® in Machine Learning, 12(4):307-392, 2019. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.407, + 0.482, + 0.571 + ], + "angle": 0, + "content": "[30] Dan Kondratyuk, Lijun Yu, Xiuye Gu, Jose Lezama, Jonathan Huang, Grant Schindler, Rachel Hornung, Vighnesh Birodkar, Jimmy Yan, Ming-Chang Chiu, Krishna Somandepalli, Hassan Akbari, Yair Alon, Yong Cheng, Joshua V. Dillon, Agrim Gupta, Meera Hahn, Anja Hauth, David Hendon, Alonso Martinez, David Minnen, Mikhail Sirotenko, Kihyuk Sohn, Xuan Yang, Hartwig Adam, Ming-Hsuan Yang, Irfan Essa, Huisheng Wang, David A Ross, Bryan Seybold, and Lu Jiang. Videopoet: A large language model for zero-shot video generation. In Proceedings of the 41st International Conference on Machine Learning, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.575, + 0.482, + 0.642 + ], + "angle": 0, + "content": "[31] Tuomas Kynkänniemi, Miika Aittala, Tero Karras, Samuli Laine, Timo Aila, and Jaakko Lehtinen. Applying guidance in a limited interval improves sample and distribution quality in diffusion models. arXiv preprint arXiv:2404.07724, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.645, + 0.482, + 0.686 + ], + "angle": 0, + "content": "[32] Yann LeCun, Yoshua Bengio, et al. Convolutional networks for images, speech, and time series. The handbook of brain theory and neural networks, 3361(10):1995, 1995. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.688, + 0.482, + 0.757 + ], + "angle": 0, + "content": "[33] Doyup Lee, Chiheon Kim, Saehoon Kim, Minsu Cho, and Wook-Shin Han. Autoregressive image generation using residual quantization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11523-11532, 2022. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.759, + 0.482, + 0.828 + ], + "angle": 0, + "content": "[34] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In International conference on machine learning, pages 19730-19742. PMLR, 2023. 4, 5, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.83, + 0.482, + 0.871 + ], + "angle": 0, + "content": "[35] Tianhong Li, Yonglong Tian, He Li, Mingyang Deng, and Kaiming He. Autoregressive image generation without vector quantization. arXiv preprint arXiv:2406.11838, 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.873, + 0.482, + 0.901 + ], + "angle": 0, + "content": "[36] Xiang Li, Kai Qiu, Hao Chen, Jason Kuen, Jiquiang Gu, Bhiksha Raj, and Zhe Lin. Imagefolder: Autoregres" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.093, + 0.483, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.549, + 0.093, + 0.905, + 0.119 + ], + "angle": 0, + "content": "sive image generation with folded tokens. arXiv preprint arXiv:2410.01756, 2024. 2, 3, 7, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.122, + 0.905, + 0.177 + ], + "angle": 0, + "content": "[37] Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.179, + 0.905, + 0.247 + ], + "angle": 0, + "content": "[38] Dongyang Liu, Shitian Zhao, Le Zhuo, Weifeng Lin, Yu Qiao, Hongsheng Li, and Peng Gao. Lumina-mgpt: Illuminate flexible photorealistic text-to-image generation with multimodal generative pretraining. arXiv preprint arXiv:2408.02657, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.25, + 0.905, + 0.29 + ], + "angle": 0, + "content": "[39] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In International Conference on Learning Representations, 2019. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.293, + 0.905, + 0.348 + ], + "angle": 0, + "content": "[40] Zhuoyan Luo, Fengyuan Shi, Yixiao Ge, Yujiu Yang, Limin Wang, and Ying Shan. Open-magvit2: An open-source project toward democratizing auto-regressive visual generation. arXiv preprint arXiv:2409.04410, 2024. 2, 4, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.35, + 0.905, + 0.405 + ], + "angle": 0, + "content": "[41] Chuofan Ma, Yi Jiang, Junfeng Wu, Jihan Yang, Xin Yu, Zehuan Yuan, Bingyue Peng, and Xiaojuan Qi. Unitok: A unified tokenizer for visual generation and understanding. arXiv preprint arXiv:2502.20321, 2025. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.407, + 0.905, + 0.476 + ], + "angle": 0, + "content": "[42] Nanye Ma, Mark Goldstein, Michael S Albergo, Nicholas M Boffi, Eric Vanden-Eijnden, and Saining Xie. Sit: Exploring flow and diffusion-based generative models with scalable interpolant transformers. In European Conference on Computer Vision, pages 23-40. Springer, 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.478, + 0.905, + 0.547 + ], + "angle": 0, + "content": "[43] Maxime Oquab, Timothee Darcet, Theo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193, 2023. 2, 4, 8, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.549, + 0.905, + 0.603 + ], + "angle": 0, + "content": "[44] William Peebles and Saining Xie. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4195-4205, 2023. 7, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.606, + 0.905, + 0.673 + ], + "angle": 0, + "content": "[45] Liao Qu, Huichao Zhang, Yiheng Liu, Xu Wang, Yi Jiang, Yiming Gao, Hu Ye, Daniel K Du, Zehuan Yuan, and Xinglong Wu. Tokenflow: Unified image tokenizer for multimodal understanding and generation. arXiv preprint arXiv:2412.03069, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.676, + 0.905, + 0.757 + ], + "angle": 0, + "content": "[46] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PmLR, 2021. 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.76, + 0.907, + 0.83 + ], + "angle": 0, + "content": "[47] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 7, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.832, + 0.907, + 0.901 + ], + "angle": 0, + "content": "[48] Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. International journal of computer vision, 115:211-252, 2015. 3" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.907, + 0.901 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.092, + 0.482, + 0.147 + ], + "angle": 0, + "content": "[49] Fengyuan Shi, Zhuoyan Luo, Yixiao Ge, Yujiu Yang, Ying Shan, and Limin Wang. Taming scalable visual tokenizer for autoregressive image generation. arXiv preprint arXiv:2412.02692, 2024. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.149, + 0.482, + 0.203 + ], + "angle": 0, + "content": "[50] Peize Sun, Yi Jiang, Shoufa Chen, Shilong Zhang, Bingyue Peng, Ping Luo, and Zehuan Yuan. Autoregressive model beats diffusion: Llama for scalable image generation. 2024. 1, 2, 3, 4, 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.205, + 0.482, + 0.272 + ], + "angle": 0, + "content": "[51] Gemini Team, Rohan Anil, Sebastian Borgeaud, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, Katie Millican, et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.274, + 0.482, + 0.342 + ], + "angle": 0, + "content": "[52] Gemini Team, Petko Georgiev, Ving Ian Lei, Ryan Burnell, Libin Bai, Anmol Gulati, Garrett Tanzer, Damien Vincent, Zhufeng Pan, Shibo Wang, et al. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.343, + 0.482, + 0.412 + ], + "angle": 0, + "content": "[53] Keyu Tian, Yi Jiang, Zehuan Yuan, BINGYUE PENG, and Liwei Wang. Visual autoregressive modeling: Scalable image generation via next-scale prediction. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. 2, 7, 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.414, + 0.482, + 0.481 + ], + "angle": 0, + "content": "[54] Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.483, + 0.482, + 0.55 + ], + "angle": 0, + "content": "[55] Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.552, + 0.482, + 0.593 + ], + "angle": 0, + "content": "[56] Aaron Van Den Oord, Oriol Vinyals, et al. Neural discrete representation learning. Advances in neural information processing systems, 30, 2017. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.595, + 0.482, + 0.621 + ], + "angle": 0, + "content": "[57] A Vaswani. Attention is all you need. Advances in Neural Information Processing Systems, 2017. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.623, + 0.482, + 0.663 + ], + "angle": 0, + "content": "[58] Hanyu Wang, Saksham Suri, Yixuan Ren, Hao Chen, and Abhinav Shrivastava. Larp: Tokenizing videos with a learned autoregressive generative prior. In ICLR, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.665, + 0.482, + 0.718 + ], + "angle": 0, + "content": "[59] Luting Wang, Yang Zhao, Zijian Zhang, Jiashi Feng, Si Liu, and Bingyi Kang. Image understanding makes for a good tokenizer for image generation. arXiv preprint arXiv:2411.04406, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.72, + 0.482, + 0.775 + ], + "angle": 0, + "content": "[60] Xinlong Wang, Xiaosong Zhang, Zhengxiong Luo, Quan Sun, Yufeng Cui, Jinsheng Wang, Fan Zhang, Yueze Wang, Zhen Li, Qiying Yu, et al. Emu3: Next-token prediction is all you need. arXiv preprint arXiv:2409.18869, 2024. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.777, + 0.482, + 0.83 + ], + "angle": 0, + "content": "[61] Mark Weber, Lijun Yu, Qihang Yu, Xueqing Deng, Xiaohui Shen, Daniel Cremers, and Liang-Chieh Chen. Maskbit: Embedding-free image generation via bit tokens. arXiv preprint arXiv:2409.16211, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.832, + 0.482, + 0.9 + ], + "angle": 0, + "content": "[62] Chengyue Wu, Xiaokang Chen, Zhiyu Wu, Yiyang Ma, Xingchao Liu, Zizheng Pan, Wen Liu, Zhenda Xie, Xingkai Yu, Chong Ruan, et al. Janus: Decoupling visual encoding for unified multimodal understanding and generation. arXiv preprint arXiv:2410.13848, 2024. 1" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.092, + 0.482, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.092, + 0.906, + 0.16 + ], + "angle": 0, + "content": "[63] Yecheng Wu, Zhuoyang Zhang, Junyu Chen, Haotian Tang, Dacheng Li, Yunhao Fang, Ligeng Zhu, Enze Xie, Hongxu Yin, Li Yi, et al. Vila-u: a unified foundation model integrating visual understanding and generation. arXiv preprint arXiv:2409.04429, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.163, + 0.906, + 0.217 + ], + "angle": 0, + "content": "[64] Wanghan Xu, Xiaoyu Yue, Zidong Wang, Yao Teng, Wenlong Zhang, Xihui Liu, Luping Zhou, Wanli Ouyang, and Lei Bai. Exploring representation-aligned latent space for better generation. arXiv preprint arXiv:2502.00359, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.219, + 0.906, + 0.259 + ], + "angle": 0, + "content": "[65] Jingfeng Yao and Xinggang Wang. Reconstruction vs. generation: Taming optimization dilemma in latent diffusion models. arXiv preprint arXiv:2501.01423, 2025. 3, 7, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.261, + 0.906, + 0.327 + ], + "angle": 0, + "content": "[66] Jiahui Yu, Xin Li, Jing Yu Koh, Han Zhang, Ruoming Pang, James Qin, Alexander Ku, Yuanzhong Xu, Jason Baldridge, and Yonghui Wu. Vector-quantized image modeling with improved vqgan. arXiv preprint arXiv:2110.04627, 2021. 1, 2, 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.33, + 0.906, + 0.412 + ], + "angle": 0, + "content": "[67] Lijun Yu, Yong Cheng, Kihyuk Sohn, José Lezama, Han Zhang, Huiwen Chang, Alexander G Hauptmann, Ming-Hsuan Yang, Yuan Hao, Irfan Essa, et al. Magvit: Masked generative video transformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10459-10469, 2023. 5, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.414, + 0.906, + 0.495 + ], + "angle": 0, + "content": "[68] Lijun Yu, Yong Cheng, Zhiruo Wang, Vivek Kumar, Wolfgang Macherey, Yanping Huang, David Ross, Irfan Essa, Yonatan Bisk, Ming-Hsuan Yang, et al. Spae: Semantic pyramid autoencoder for multimodal generation with frozen llms. Advances in Neural Information Processing Systems, 36:52692-52704, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.497, + 0.906, + 0.566 + ], + "angle": 0, + "content": "[69] Lijun Yu, José Lezama, Nitesh B Gundavarapu, Luca Versari, Kihyuk Sohn, David Minnen, Yong Cheng, Vighnesh Birodkar, Agrim Gupta, Xiuye Gu, et al. Language model beats diffusion-tokenizer is key to visual generation. arXiv preprint arXiv:2310.05737, 2023. 1, 2, 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.567, + 0.906, + 0.62 + ], + "angle": 0, + "content": "[70] Qihang Yu, Mark Weber, Xueqing Deng, Xiaohui Shen, Daniel Cremers, and Liang-Chieh Chen. An image is worth 32 tokens for reconstruction and generation. arXiv preprint arXiv:2406.07550, 2024. 2, 7, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.623, + 0.906, + 0.69 + ], + "angle": 0, + "content": "[71] Sihyun Yu, Sangkyung Kwak, Huiwon Jang, Jongheon Jeong, Jonathan Huang, Jinwoo Shin, and Saining Xie. Representation alignment for generation: Training diffusion transformers is easier than you think. arXiv preprint arXiv:2410.06940, 2024. 3, 7, 8, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.692, + 0.906, + 0.747 + ], + "angle": 0, + "content": "[72] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In Proceedings of the IEEE/CVF international conference on computer vision, pages 11975-11986, 2023. 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.749, + 0.906, + 0.829 + ], + "angle": 0, + "content": "[73] Baoquan Zhang, Huaibin Wang, Chuyao Luo, Xutao Li, Guotao Liang, Yunming Ye, Xiaochen Qi, and Yao He. Codebook transfer with part-of-speech for vector-quantized image modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7757–7766, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.832, + 0.906, + 0.9 + ], + "angle": 0, + "content": "[74] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 586-595, 2018. 5" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.092, + 0.906, + 0.9 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.092, + 0.482, + 0.134 + ], + "angle": 0, + "content": "[75] Yue Zhao, Yuanjun Xiong, and Philipp Krajhenbuhl. Image and video tokenization with binary spherical quantization. arXiv preprint arXiv:2406.07548, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.136, + 0.483, + 0.177 + ], + "angle": 0, + "content": "[76] Lei Zhu, Fangyun Wei, Yanye Lu, and Dong Chen. Scaling the codebook size of vqgan to 100,000 with a utilization rate of \\(99\\%\\). arXiv preprint arXiv:2406.11837, 2024. 2, 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.179, + 0.483, + 0.233 + ], + "angle": 0, + "content": "[77] Yongxin Zhu, Bocheng Li, Hang Zhang, Xin Li, Linli Xu, and Lidong Bing. Stabilize the latent space for image autoregressive modeling: A unified perspective. arXiv preprint arXiv:2410.12490, 2024. 3" + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.092, + 0.483, + 0.233 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "title", + "bbox": [ + 0.2, + 0.086, + 0.8, + 0.131 + ], + "angle": 0, + "content": "GigaTok: Scaling Visual Tokenizers to 3 Billion Parameters for Autoregressive Image Generation" + }, + { + "type": "text", + "bbox": [ + 0.382, + 0.142, + 0.615, + 0.163 + ], + "angle": 0, + "content": "Supplementary Material" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.179, + 0.368, + 0.195 + ], + "angle": 0, + "content": "A. Limitations and Future Work" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.205, + 0.484, + 0.402 + ], + "angle": 0, + "content": "This study primarily focuses on scaling tokenizers for class-conditional image generation. While we have demonstrated the effectiveness of GigaTok for downstream class-conditional generation, expanding the scope to include text-conditional image generation or video generation remains an open avenue for future work. Additionally, unlike CNN-based 2D tokenizers, 1D Transformer-based tokenizers are not directly applicable to multiple resolutions without additional training adjustments. This challenge presents an important direction for further exploration. Besides scaling the model sizes of tokenizers, the effect of scaling training data, codebook dimension and codebook size for downstream autoregressive generation are left for future research." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.417, + 0.371, + 0.433 + ], + "angle": 0, + "content": "B. Configurations for AR models" + }, + { + "type": "table", + "bbox": [ + 0.149, + 0.455, + 0.428, + 0.542 + ], + "angle": 0, + "content": "
SizeParams.BlocksHeadsDim.
B111M1212768
L343M24161024
XL775M36201280
XXL1.4B48241536
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.553, + 0.483, + 0.58 + ], + "angle": 0, + "content": "Table 8. Architectures of the LLamaGen models in our experiments." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.599, + 0.483, + 0.749 + ], + "angle": 0, + "content": "AR model training. We scale up the training of downstream Llama-style [50, 54] AR models to compare generation performance with other models. For model training, we use WSD learning rate scheduler [20, 25] with \\(1 \\times 10^{-4}\\) base learning rate, 0.2 decay ratio and 1 epoch warm-up. We do not use AdaLN [44, 53] as it is specific for class-conditional generation. We use a batch size of 256 for training the B, L and XL models and a 512 batch size for training the XXL model. Our AR models are trained for 300 epochs on the \\(256 \\times 256\\) ImageNet training set." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.75, + 0.483, + 0.902 + ], + "angle": 0, + "content": "CFG for gFID. Since gFID of GPT models can be largely affected by classifier free guidance (CFG) [47, 50] and often has an optimal CFG [50], for fair comparison, we search the optimal CFG using zero-order search with a step of 0.25 and use the lowest gFID as the final value. For AR Probing, we use constant CFG scheduling for simplicity. For system-level comparison, we use a step function for CFG scheduling inspired by [31]. Specifically, the AR models predict the first \\(18\\%\\) tokens without CFG, i.e., \\(\\mathrm{CFG} = 1\\) for better diversity, and use CFG for the remaining tokens" + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.179, + 0.907, + 0.395 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.535, + 0.408, + 0.884, + 0.423 + ], + "angle": 0, + "content": "Figure 10. The architecture of GigaTok with Q-Former." + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.436, + 0.905, + 0.553 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.518, + 0.564, + 0.9, + 0.579 + ], + "angle": 0, + "content": "Figure 11. Initialization of 1D queries in Q-Former modules." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.606, + 0.906, + 0.636 + ], + "angle": 0, + "content": "for better visual quality. Interestingly, we find that the 1.4B LlamaGen model achieves the best gFID without CFG." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.649, + 0.829, + 0.667 + ], + "angle": 0, + "content": "C. Detailed GigaTok Implementation" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.675, + 0.784, + 0.689 + ], + "angle": 0, + "content": "Please refer to Tab. 9 for training details." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.689, + 0.906, + 0.884 + ], + "angle": 0, + "content": "Q-Fomrer in GigaTok. GigaTok utilizes Q-Former [6, 34] to build 1D tokenizers, as shown in Fig. 10. For Q-Former encoder in GigaTok, we initialize the 1D queries initialized from the 2D input features of the CNN encoder using a multi-level average pooling strategy, as shown in Fig. 11. Specifically, for the same 2D input features, we spatially divide them with different granularity at different levels, and perform average pooling for every divided region at each level. The pooled features are flattened and concatenated from level 0 to the last level. Therefore, a 1D token sequence with \\(2^{L}\\) length can be initialized with \\(L\\) levels from 2D input features. At the decoding stage, the 2D queries are all initialized from the first 1D latent feature." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.886, + 0.905, + 0.902 + ], + "angle": 0, + "content": "Entropy Loss for VQ Tokenizers. While entropy loss [67," + } + ], + [ + { + "type": "table", + "bbox": [ + 0.264, + 0.089, + 0.737, + 0.438 + ], + "angle": 0, + "content": "
ConfigurationS-SS-BS-LB-LXL-XXL
Q-Former Encoder depth6661236
Q-Former Encoder heads8881220
Q-Former Encoder dim.5125125127681280
Q-Former Decoder depth612242448
Q-Former Decoder heads.812161624
Q-Former Decoder dim.512768102410241536
Params (M)1362325336222896
Codebook size16384
Codebook dimension8
#Tokens256
Training epochs100200200200300
Batch size128128256256256
Alignment Layer l3
Learning rate scheduleCosine Decay
Base learning rate\\( 1 \\times 10^{-4} \\)
Minimum learning rate\\( 1 \\times 10^{-5} \\)
LR warm-up iterations00005000
OptimizerAdamW[39]
Opt. momentum\\( \\beta_1 = 0.9, \\beta_2 = 0.95 \\)
Entropy Loss weight0000\\( 5 \\times 10^{-3} \\)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.324, + 0.446, + 0.677, + 0.461 + ], + "angle": 0, + "content": "Table 9. GigaTok configuration and default training details" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.486, + 0.483, + 0.608 + ], + "angle": 0, + "content": "[69] is discussed for LFQ [69], its application to VQ tokenizers is less commonly explained. We provide a detailed derivation of the entropy loss specifically for VQ tokenizers. Mathematically, for quantization process from continuous vector \\(\\mathbf{z} \\in \\mathbb{R}^D\\) to quantized vector \\(\\hat{\\mathbf{z}} = \\mathbf{c}_i \\in \\mathbb{R}^D\\) where \\(\\mathbf{c}_i\\) is the \\(i\\)-th codebook vector from codebook \\(\\mathbf{C} \\in \\mathbb{R}^{N \\times D}\\), we assume this process is statistical and follows the following distribution:" + }, + { + "type": "equation", + "bbox": [ + 0.16, + 0.619, + 0.483, + 0.639 + ], + "angle": 0, + "content": "\\[\np (\\hat {\\mathbf {z}} = \\mathbf {c} _ {i} | \\mathbf {z}) \\triangleq \\operatorname {s o f t m a x} (- l _ {2} (\\mathbf {z}, \\mathbf {C})) [ i ] \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.648, + 0.484, + 0.801 + ], + "angle": 0, + "content": "where \\(l_{2}(\\mathbf{z},\\mathbf{C})\\in \\mathbb{R}^{N}\\) is the \\(L_{2}\\) distance between \\(\\mathbf{z}\\) and all the codebook vectors. Then, minimization of the quantization error can be partially achieved by minimizing the expectation of entropy \\(\\mathbb{E}_{\\mathbf{z}}[H(\\hat{\\mathbf{z}} |\\mathbf{z})]\\), which can be understood as maximizing the prediction confidence for \\(p(\\hat{\\mathbf{z}} |\\mathbf{z})\\). To encourage higher codebook utilization, we aim to make the average appearance probability of codebook vectors more uniform. This is achieved by maximizing the entropy \\(H(\\hat{\\mathbf{z}})\\). Therefore, the optimization of the two entropy terms leads to the final entropy loss equation:" + }, + { + "type": "equation", + "bbox": [ + 0.182, + 0.812, + 0.483, + 0.831 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {e n t r o p y}} = \\mathbb {E} _ {\\mathbf {z}} [ H (\\hat {\\mathbf {z}} | \\mathbf {z}) ] - H (\\hat {\\mathbf {z}}) \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.84, + 0.485, + 0.903 + ], + "angle": 0, + "content": "In practice, to calculate \\( H(\\hat{\\mathbf{z}}) \\), we estimate \\( p(\\hat{\\mathbf{z}} = \\mathbf{c}_i) \\) by \\( p(\\hat{\\mathbf{z}} = \\mathbf{c}_i) = \\mathbb{E}_{\\mathbf{z}}[p(\\hat{\\mathbf{z}} = \\mathbf{c}_i|\\mathbf{z})] \\). Note that entropy loss is not our contribution. We only provide a detailed definition of entropy loss in VQ scenarios for better understanding." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.486, + 0.908, + 0.775 + ], + "angle": 0, + "content": "Additional implementation details. To stabilize the training of our tokenizer with a hybrid architecture, we initially use a shortcut feature reconstruction trick at the first 15k iterations of the tokenizer training. But we later found that this trick can be replaced with a simple 1-epoch learning rate warmup combined with entropy loss [15, 69]. Specifically for this trick, we additionally give the output feature of the CNN encoder to the CNN decoder directly to be trained for reconstruction, and also align the output feature of the Transformer decoder to the output feature of the CNN encoder, besides the original training objectives. Note that this strategy is complex and can even hinder performance for XL-XXL tokenizers. We recommend using the learning rate warmup combined with entropy loss [15, 69] instead, for both XL-XXL tokenizer and the smaller ones. Additionally, we utilize the rotation trick [17] for all tokenizers, though we observe its effect on performance to be limited for our tokenizer. The implementation of the semantic regularization is partially inspired by REPA [71]." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.785, + 0.853, + 0.803 + ], + "angle": 0, + "content": "D. Full Evaluation Results and Analysis" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.811, + 0.909, + 0.903 + ], + "angle": 0, + "content": "Here we present the full evaluation results for the tokenizers and downstream AR models, as summarized in Tab. 10. We observe that scaling up visual tokenizers consistently improves the reconstruction quality across multiple metrics. Interestingly, for the 1.4B AR model, the lowest gFID is obtained without applying any CFG. This phenomenon is" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.092, + 0.089, + 0.947, + 0.235 + ], + "angle": 0, + "content": "
TokenizerParam. rFID↓LPIPS↓PSNR↑SSIM↑AR ModelParam. gFID↓Acc.↑IS↑Precision↑Recall↑
LlamaGen-Tok. [50]72M2.19-20.790.675LlamaGen-B [50]111M5.46-193.610.830.45
GigaTok-S-S136M1.010.222620.740.670LlamaGen-B (1d) [50]111M4.0562.6240.610.810.51
GigaTok-S-B232M0.890.212120.930.677LlamaGen-B (1d) [50]111M3.8362.9233.310.830.51
GigaTok-B-L622M0.810.205921.210.685LlamaGen-B (1d) [50]111M3.2667.6221.020.810.56
LlamaGen-XXL (1d) [50]1.4B2.03*69.4238.520.800.63
GigaTok-B-L622M0.51‡0.20621.320.691LlamaGen-B (1d) [50]111M3.3367.7265.430.800.56
GigaTok-XL-XXL2.9B0.790.194721.650.699LlamaGen-B (1d) [50]111M3.1572.0224.280.820.55
LlamaGen-XXL (1d) [50]1.4B1.98*74.0256.760.810.62
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.247, + 0.908, + 0.276 + ], + "angle": 0, + "content": "Table 10. Full results for our tokenizers and AR models on ImageNet \\( {256} \\times {256} \\) . For gFID,we present the lowest value between w/ or w/o CFG scenarios. \\( \\ddagger \\) : Using frozen DINO [7] for discriminator,which largely improves rFID. \\( \\star \\) : Without classifier-free-guidance." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.302, + 0.482, + 0.361 + ], + "angle": 0, + "content": "also observed in the concurrent work FlexTok [3], despite significant differences between GigaTok and FlexTok. We hypothesize that semantic regularization might be the primary contributing factor for this phenomenon." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.363, + 0.483, + 0.604 + ], + "angle": 0, + "content": "Discussion on Scaling and Enhancing the Discriminator. Recently, VAR [53], ImageFolder [36], and the concurrent work UniTok [41] have begun leveraging DINO-based discriminators [7, 43] to enhance tokenizer training, achieving impressive improvements in rFID scores. We have also experimented with the same DINO discriminator configuration as VAR. Our results indicate that although rFID scores improve, the downstream generation quality improvements are less significant, as detailed in Tab. 10. Furthermore, when applying the DINO discriminator to XL-XXL tokenizers, we observed that adversarial training frequently encounters instability. Specifically, a strong discriminator quickly learns to distinguish reconstructed samples, diminishing the benefits of adversarial training and leading to blurry artifacts. We leave further exploration of discriminator scaling and enhancement strategies for future work." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.619, + 0.453, + 0.635 + ], + "angle": 0, + "content": "E. Training Tokenizers for More Iterations" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.644, + 0.483, + 0.749 + ], + "angle": 0, + "content": "While we largely resolve the reconstruction vs. generation dilemma regarding tokenizer model scaling, this challenge persists for tokenizer training duration scaling. To illustrate this phenomenon, we train five S-S tokenizers ranging from 40 to 120 epochs using a cosine learning rate scheduler, as detailed in Tab. 9. The results are presented in Fig. 12." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.75, + 0.483, + 0.884 + ], + "angle": 0, + "content": "When extending tokenizer training iterations, reconstruction quality consistently improves. However, downstream generation quality initially improves but subsequently degrades with further increases in tokenizer training duration. Additionally, the validation loss of AR probing continuously rises with longer tokenizer training, regardless of semantic regularization. This trend suggests an increasing complexity in the tokenizer's latent space as the training duration extends." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.886, + 0.483, + 0.901 + ], + "angle": 0, + "content": "We hypothesize that data scaling may alleviate this is-" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.302, + 0.907, + 0.363 + ], + "angle": 0, + "content": "sue, and leave it for future exploration. In practice, allocating computational resources toward model scaling rather than extended training duration may yield better tokenizer performance." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.379, + 0.871, + 0.396 + ], + "angle": 0, + "content": "F. Linear Probing Accuracy of Tokenizers" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.405, + 0.907, + 0.631 + ], + "angle": 0, + "content": "We show that the linear probing accuracy of the tokenizer encoders may not necessarily indicate the performance of downstream AR models. We utilize the intermediate checkpoints during the training of B-L and XL-XXL tokenizers for evaluation. As shown in Fig. 13, the XL-XXL tokenizer encoder presents an overfitting trend in terms of tokenizer encoder linear probing accuracy. However, this overfitting trend is not reflected in AR Probing linear probing accuracy or gFID. Therefore, the linear probing accuracy of the tokenizer encoders may not be a good indicator of downstream model performance. Similarly, a concurrent work UniTok [41], also points out that the performance of the tokenizer encoder in terms of zero-shot ImageNet classification accuracy may not necessarily reflect the visual understanding ability of downstream LLMs trained on the tokenizer." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.632, + 0.906, + 0.738 + ], + "angle": 0, + "content": "The abnormality for large tokenizers reveals that the linear probing accuracy of the tokenizer is not necessarily a good indicator for downstream generation models. Since we care more about the representation learning for downstream models than for the tokenizers, using AR Probing as a direct evaluating method is better than indirect tokenizer linear probing accuracy." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.754, + 0.868, + 0.77 + ], + "angle": 0, + "content": "G. More Discussions About Related Work" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.78, + 0.906, + 0.9 + ], + "angle": 0, + "content": "TiTok [70] explores the use of 1D Transformer-based tokenizers under a high compression rate setting. TiTok seminally explores the model scaling of visual tokenizers and uses larger tokenizers for higher compression rate. However, the reconstruction vs. generation dilemma for scaling tokenizers is not solved in TiTok. As a result, the best generation model in TiTok is still trained on its smallest tokenizer variant." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.327, + 0.092, + 0.732, + 0.109 + ], + "angle": 0, + "content": "w/o semantic regularization w/ semantic regularization" + }, + { + "type": "image", + "bbox": [ + 0.097, + 0.112, + 0.237, + 0.231 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.249, + 0.112, + 0.396, + 0.231 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.401, + 0.112, + 0.58, + 0.231 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.584, + 0.112, + 0.731, + 0.231 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.735, + 0.112, + 0.902, + 0.231 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.245, + 0.907, + 0.286 + ], + "angle": 0, + "content": "Figure 12. Training duration scaling trends of tokenizers for reconstruction, downstream generation and representation quality with and without semantic regularization. Note that in the last two figures, the red and blue curves correspond to different scales on the y-axis." + }, + { + "type": "image", + "bbox": [ + 0.225, + 0.313, + 0.379, + 0.327 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.095, + 0.333, + 0.216, + 0.44 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.22, + 0.333, + 0.348, + 0.44 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.358, + 0.333, + 0.475, + 0.44 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.452, + 0.483, + 0.522 + ], + "angle": 0, + "content": "Figure 13. The linear probing accuracy of tokenizer encoders does not necessarily reflect downstream model performance. As the training proceeds, the XL-XXL tokenizer encoder presents an overfitting trend measured by linear probing accuracy, but downstream model performances consistently improve." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.551, + 0.483, + 0.732 + ], + "angle": 0, + "content": "ViTok [21] is a concurrent work which has explored the effect of model scaling for VAE [28]. ViTok evaluates its VAE models in terms of both reconstruction and downstream diffusion generation performance. While having a very different setting from GigaTok, ViTok similarly finds that asymmetric design is better for VAEs. While ViTok suggests that small encoders are optimal, we point out that in our setting scaling encoders is also beneficial. Notably, the reconstruction vs. generation dilemma for scaling visual tokenizers is not solved in ViTok. We hypothesize that adding semantic regularization may similarly help solve the tokenizer scaling dilemma for VAEs, but leave it for future study." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.734, + 0.483, + 0.869 + ], + "angle": 0, + "content": "MAGVIT-v2 [69] introduces LFQ to enhance discrete tokenizers. It also introduces the entropy penalty for tokenizer training, which is shown to be important for training large-scale tokenizers in our work. Instead of tokenizer model scaling, MAGVIT-v2 focuses more on scaling the codebook size of tokenizers. While codebook dimension and codebook size are important bottlenecks for visual tokenizers, we point out that model size scaling is also an important way for improving visual tokenizers." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.871, + 0.483, + 0.901 + ], + "angle": 0, + "content": "ImageFolder [36] utilizes two branches for image encoding to handle high-level semantic information and low-level" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.313, + 0.905, + 0.356 + ], + "angle": 0, + "content": "visual details respectively. It seminally utilizes semantic alignment to enhance the learned representation of tokenizers." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.358, + 0.907, + 0.48 + ], + "angle": 0, + "content": "VA-VAE [65] tames the reconstruction vs. generation dilemma in increasing latent dimensions for continuous VAE [28, 29]. VA-VAE improves the reconstruction-generation Pareto Frontier by introducing vision foundation model alignment loss. In contrast, we seek continuous improvements in both reconstruction and generation by scaling tokenizers. Semantic regularization serves different purposes in the two works." + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_08xxx/2504.08736/eda122d9-2e32-4d6c-a34a-34f5d8cbbcb6_origin.pdf b/data/2025/2504_08xxx/2504.08736/eda122d9-2e32-4d6c-a34a-34f5d8cbbcb6_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..eab686f6db630ddafb139cf4c8e7248854cd119d --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/eda122d9-2e32-4d6c-a34a-34f5d8cbbcb6_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca73d0238a2645fc2c81cdf6a781856fec64d94257c9eb51bac0d7dc28a759f4 +size 1497006 diff --git a/data/2025/2504_08xxx/2504.08736/full.md b/data/2025/2504_08xxx/2504.08736/full.md new file mode 100644 index 0000000000000000000000000000000000000000..3e91a38630147eb1e9cb147fba3cef8a2e622d67 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/full.md @@ -0,0 +1,481 @@ +# GigaTok: Scaling Visual Tokenizers to 3 Billion Parameters for Autoregressive Image Generation + +Tianwei Xiong + +Jun Hao Liew2 + +Zilong Huang + +Jiashi Feng2 + +Xihui Liu $^{1\dagger}$ + +1The University of Hong Kong + +2ByteDance Seed + +Project page: https://silentview.github.io/GigaTok/ + +# Abstract + +In autoregressive (AR) image generation, visual tokenizers compress images into compact discrete latent tokens, enabling efficient training of downstream autoregressive models for visual generation via next-token prediction. While scaling visual tokenizers improves image reconstruction quality, it often degrades downstream generation quality—a challenge not adequately addressed in existing literature. To address this, we introduce GigaTok, the first approach to simultaneously improve image reconstruction, generation, and representation learning when scaling visual tokenizers. We identify the growing complexity of latent space as the key factor behind the reconstruction vs. generation dilemma. To mitigate this, we propose semantic regularization, which aligns tokenizer features with semantically consistent features from a pre-trained visual encoder. This constraint prevents excessive latent space complexity during scaling, yielding consistent improvements in both reconstruction and downstream autoregressive generation. Building on semantic regularization, we explore three key practices for scaling tokenizers: (1) using 1D tokenizers for better scalability, (2) prioritizing decoder scaling when expanding both encoder and decoder, and (3) employing entropy loss to stabilize training for billion-scale tokenizers. By scaling to 3 billion parameters, GigaTok achieves state-of-the-art performance in reconstruction, downstream AR generation, and downstream AR representation quality. + +# 1. Introduction + +Autoregressive (AR) language models (LM) have emerged as a promising approach for visual generation [15, 50, 66, 69], driven by their proven scalability [2, 5, 14, 19, 37, 51, 52, 54, 55] and the potential for unified multimodal modeling [12, 45, 62]. The AR image generation framework consists of a visual tokenizer and a downstream AR generator. The tokenizer encodes images into discrete tokens, trained + +![](images/b705830c6e5c612a3f65b4102bf1f7f236bd0886aafb32e0701e980d97987c13.jpg) + +![](images/274370f0250037eda8667d5c1a5297451a24c1e086f672ad0122c6e9cf19af1a.jpg) + +![](images/4a851190f391968fff60efe2758f19f4554b97d7becbd7c5a587a2444a37eb3c.jpg) + +![](images/87c6f73875da6f2e2c33fde3ca1f08f894833374e305624ad1c2334895a90f12.jpg) + +![](images/77cf18f6ca5c4e6b0b0a0e25d4e49d3417732c35d65185fce0b503e3a22f5c32.jpg) + +![](images/353f2db32bb48dc55d7ae5715b753a481a90eeff571048278ccfe5c6dbf05fa4.jpg) +Figure 1. Reconstruction vs. generation dilemma: Naively scaling visual tokenizers achieves better reconstruction but degrades downstream autoregressive (AR) generation. In contrast, GigaTok achieves better performance for both reconstruction and generation as tokenizers scale up. + +![](images/bb78f583482a5c116fc43be783712b1b6b60655b1a1addd0864eeffddc52cd97.jpg) + +![](images/9a5879ed81b192743b97092ab73ca4b3a7b630621c05af234a340dc12fc53b01.jpg) +Better generation with larger tokenizer + +![](images/67606d8a17a8e261fbda24cf1eb68ba0a5931c3eb125b78425daa2c2a6b086a8.jpg) + +![](images/ed48fc0f3f77e7ab087ee9a1c9566f902d55b4209f29ae02a5ae2ec1ca6117c6.jpg) + +with image reconstruction supervision, while the AR generator models the distribution of these discrete tokens through next-token prediction. The image tokenizer plays a pivotal role in AR visual generation, providing a compact and expressive latent space that enables effective generative modeling by downstream AR models. + +Despite its pivotal role, scaling of visual tokenizer is rarely explored in the literature. In fact, unlike the downstream AR models whose scalability has been widely validated [12, 30, 60, 62], scaling the visual tokenizer presents a significant challenge. Specifically, there exists a reconstruction vs. generation dilemma, where scaling tokenizer improves reconstruction fidelity but degrades downstream generation quality, as shown in Fig. 1. This dilemma is also observed in prior works [13, 21]. In this work, we seek to overcome this limitation and explore strategies for effectively scaling tokenizers to enhance both reconstruction and generation performance. + +To investigate the root cause of this dilemma, we propose + +![](images/4a8ec5874098be40d99d09fbcc46ef153fce31d4907340b386d0ec15c364716f.jpg) +Figure 2. The 2.9B GigaTok achieves SOTA autoregressive image generation with a 1.4B AR model on ImageNet $256\times 256$ resolution. + +an AR probing scheme that trains a lightweight downstream generative AR model to monitor the tokenizer's training process. Surprisingly, we find that as tokenizers scale, the downstream AR model struggles more to learn the resulting token distribution, as evidenced by the increasing AR generation loss. This suggests that the larger tokenizers produce a more complex token space, making it increasingly difficult for AR models to learn effectively. + +To address this challenge, we introduce pre-trained visual representation models (e.g. DINOv2 [43]) to regularize tokenizers. Specifically, we leverage a semantic regularization loss during tokenizer training, encouraging high similarity between tokenizer features and the pre-trained model features. Such regularization helps constrain the latent space complexity, preventing the tokenizer from learning overly complicated latent token dependencies that hinder downstream AR generative modeling. Moreover, we design a vector-quantized (VQ) tokenizer with a hybrid CNN-Transformer architecture as the backbone, suitable for both 1D and 2D tokenizers, and explore best practices for scaling tokenizers: (1) 1D tokenizers exhibit better scalability compared to 2D tokenizers; (2) Asymmetric model scaling, prioritizing decoder scaling over encoder scaling, proves effective; (3) Entropy loss [69] becomes crucial for convergence when training tokenizers with billion-level parameters. With our semantic regularization and three key scaling strategies, we effectively scale GigaTok to 3 billion parameters, overcoming the reconstruction vs. generation dilemma. + +We summarize our contributions as follows: + +- We identify that the reconstruction vs. generation dilemma in tokenizer scaling stems from increased latent space complexity in larger tokenizers. To address this, we propose semantic regularization, effectively mitigating the dilemma and enabling tokenizer scaling. +- We explore best practices for scaling tokenizers, including 1D tokenizers with hybrid CNN-Transformer archi + +tecture, asymmetric encoder-decoder scaling, and entropy loss for billion-scale tokenizers. + +- Our GigaTok is the first tokenizer scaled to 3B, achieving state-of-the-art reconstruction, downstream AR generation, and downstream AR representation on ImageNet. + +# 2. Related Work + +Image tokenizers. Image tokenizers map image inputs into discrete [15, 56, 66] or continuous [28] tokens which can be modeled by downstream generative models. For discrete tokenizers, Vector Quantization (VQ) [15, 56, 66] is dominantly adopted. Recently, new quantization methods [49, 69, 75, 76] have also been proposed for better scaling of codebook size. However, how to properly scale up tokenizer models is insufficiently studied in existing literature. ViT-VQGAN [66] and TiTok [70] utilize transformer architecture to enable convenient scaling of tokenizers, but end up training their best generative models on smaller tokenizer versions. A concurrent work, ViTok [76], suggests de-prioritizing VAE scaling due to its less predictable effect for downstream diffusion models. We observe a similar reconstruction vs. generation dilemma in scaling discrete tokenizers, and provide our analysis and solution to it. + +Autoregressive Visual Generation. Autoregressive visual generative models [33, 38, 40, 49, 50, 56, 58, 60, 66] follow the next-token-prediction (NTP) approach of LLMs, enabling the leverage of advancements in LLMs and simplifying the path to unified multi-modal generation. Other variants utilize visual-specific paradigms such as mask image modeling [8, 61, 69, 70] and next-scale-prediction [36, 53] for better performance. We reveal that scaling tokenizers helps NTP AR models to be comparable to these variants. + +Semantic Guidance for Visual Generative Models and Tokenizers. The guidance from visual foundation models [7, 23, 43, 46, 72] has been used to improve training + +convergence speed and quality [65, 71] of visual generative models, as well as enhancing representation quality or downstream performance of visual tokenizers [9, 10, 18, 36, 41, 59, 63–65, 68, 73, 76, 77]. REPA [71] presents impressive performance improvements brought by a simple representation alignment strategy, and recently, VA-VAE [65] shows the significant benefits of semantic guidance to the reconstruction-generation Pareto Frontier of VAEs. Different from existing work, GigaTok novelly reveals the critical role of semantic regularization for resolving the reconstruction vs. generation dilemma in scaling visual tokenizers. + +# 3. Pilot Study + +We first introduce AR Probing as a proxy to effectively monitor the tokenizer's effectiveness for downstream generation (Sec 3.1), followed by a pilot experiment that investigates the reconstruction vs. generation challenges when naively scaling visual tokenizers (Sec 3.2). + +# 3.1. AR Probing for Tokenizer Evaluation + +In autoregressive visual generation, the training of the tokenizer and downstream AR model are performed in separate stages. In the first stage, a visual tokenizer is trained to compress images into discrete tokens, optimized with reconstruction objective. In the second stage, the downstream generative model is trained based on the discrete tokens from the pre-trained tokenizer. However, a tokenizer that performs well in terms of reconstruction fidelity in the first stage may not necessarily lead to better performance for downstream generative models. Thus, it is crucial to evaluate the effectiveness of the trained tokenizers for downstream generation alongside its reconstruction quality. + +Despite its importance, assessing how a tokenizer influences downstream generation models can be computationally expensive. For example, sufficiently training a 343M parameter downstream AR generator takes 170 hours on 64 V100 GPUs. To address this challenge, we introduce AR Probing, inspired by Linear Probing in representation learning literature [11, 23]. The key idea is to use the performance of a small AR model as a proxy to reflect the performance trends of large-scale AR models. + +Specifically, we use the tokenizer to train a small Llama-style model [50, 54] (111M parameters) for 50 epochs, and evaluate its gFID [24], validation loss, and linear probing accuracy [11, 23] for a fair comparison between different tokenizers. Training the proposed AR Probing model for evaluating tokenizers is $10 \times$ more efficient than training the original 343M downstream AR model. Our experiments in Sec. 5.1 (Fig. 6) demonstrate that the trends observed with AR Probing align with the performance of the large-scale AR models after sufficient training. + +gFID. The generation FID [24] of AR probing indicates the overall image generation performance of the two-stage + +![](images/957829c2bd5cc39a6f9d659e080757c607882628c29f8d7313cc9801a5dce8e6.jpg) +Figure 3. Scaling trend for vanilla 1D tokenizers. As the model size increases, the reconstruction quality of vanilla tokenizers improves but the downstream AR Probing gFID consistently degrades. The increasing AR Probing validation loss indicates that scaling vanilla tokenizers results in a more complex latent space, making it difficult for AR models to learn effectively. + +![](images/9beeaae878ebd25ec7a8ed67c02f768536aee94a7f93708f963790666b9054d6.jpg) + +![](images/ef14e0107eeace631870127030a6ebc25126aabfd00217ebbda2c0dfe9e87aee.jpg) + +framework. It reflects both the reconstruction fidelity of the tokenizer and how well the downstream AR probing model can learn the dependency of the visual tokens (i.e., learnability of the token distribution). + +Validation loss. We use the validation loss of the AR probing model to measure the learnability of the latent tokens as a disentangled factor. The validation loss is calculated as an average of the token-wise cross-entropy loss in the next-token-prediction paradigm on ImageNet [48] 50k validation set. With the same vocabulary size, the same number and structure of visual tokens, and the same AR probing model, larger validation loss indicates a latent space that is more difficult for the AR model to learn. Therefore, we use validation loss to reflect the latent space complexity and learnability for AR models. + +Linear probing accuracy. Beyond visual generation quality, we also investigate whether scaling tokenizers will lead to better visual representations of AR models, which may provide inspiration for future research in unified multimodal understanding and generation with AR models. To assess the representation quality, we adopt the standard practice [11, 23] of linear probing accuracy using features from the middle Transformer layer of the AR probing model. + +# 3.2. Naively Scaling Tokenizers Does Not Work + +To study the challenges when naively scaling visual tokenizers, we train three vector-quantized tokenizers1 on ImageNet [48] at $256 \times 256$ resolution with increasing model sizes. As shown in Fig. 3, as the tokenizer size increases, although the reconstruction quality (rFID) consistently improves, the AR generation performance (gFID) significantly degrades. This highlights the reconstruction vs. generation dilemma in tokenizer scaling. Moreover, we observe that the validation loss of AR Probing consistently increases as the tokenizers scale, indicating that larger tokenizers lead to complicated token dependencies that are more difficult for the AR model to learn. This observation motivates us to design the semantic regularization to constrain the latent space complexity of the tokenizer and therefore break the + +reconstruction vs. generation dilemma in Sec. 4.2. + +# 4. GigaTok + +In this section, we introduce the model structure and training strategies for our scalable visual tokenizer, GigaTok. In Sec. 4.1, we present a tokenizer backbone supporting 1D and 2D token structures, and discuss the asymmetric scaling strategies for the encoder and decoder. In Sec. 4.2, we introduce semantic regularization, which breaks the reconstruction vs. generation dilemma by regularizing the complexity of the latent space with pre-trained visual representations. In Sec. 4.3, we show how entropy loss [69] facilitates the convergence of billion-scale tokenizers. + +# 4.1. Architecture + +The CNN [32] architectures have been the dominant choices for image tokenizers [15, 40, 69, 76] due to their effectiveness in capturing fine-grained local details. Yet, Transformers are more scalable architectures with less inductive bias. Thus, we design a vector quantized tokenizer backbone with a hybrid architecture that combines CNN [15, 32] and Transformer [6, 13, 57] for encoder and decoder (Fig. 4). Specifically, our encoder consists of a series of CNN blocks that progressively downsamples the input image by a factor of $p$ , followed by Transformer layers and a vector quantizer to produce discrete latent codes. Similarly, our decoder consists of multiple Transformer layers, followed by CNN decoders which upsamples the features to obtain the reconstructed image2. Our tokenizer architecture can be adapted to both 1D and 2D tokenizers by using different Transformer designs introduced in the next two paragraphs. + +2D tokenizers with ViT. For 2D tokenizers, the Transformers in both tokenizer encoder and decoder are implemented by ViT [13] architecture. 2D structures of the latent features and tokens are preserved throughout the tokenizer. + +1D tokenizers with Q-Former. For 1D tokenizers, we implement the Transformer modules in both encoder and decoder as Q-Formers [6, 34]. The Q-Former in the encoder employs 1D queries, transforming 2D input features into 1D latent tokens. The Q-Former in the decoder utilizes 2D queries to transform 1D latent tokens back to 2D features, which are then passed to the CNN decoder to reconstruct images. The 1D tokenizers remove the 2D inductive bias and demonstrate better scalability than 2D tokenizers in our experiments (Sec. 5.5). + +Asymmetric encoder-decoder scaling. Since the decoder faces the more challenging task of reconstructing images from lossy latent codes, we adopt an asymmetric design for more efficient parameter allocation. Specifically, we scale both the encoder and decoder, while ensuring that + +Figure 4. GigaTok architecture and semantic regularization. +![](images/0c173660c2bed58e0acc195ee84ab6bec0a1321cd7227814a055257e9e31fd4a.jpg) +Top: We use a hybrid CNN-Transformer design for our visual tokenizer. The transformer layers are implemented with ViT for 2D tokenizer and Q-Former for 1D tokenizer. Bottom: We use a frozen DINOv2 [43] image encoder for semantic regularization. + +the decoders are always larger than the encoders. In practice, we maintain the same and fixed size for the CNN encoder/decoder and only increase the depth and width of the Transformer modules for scaling. + +# 4.2. Semantic Regularization + +In our pilot study (Sec. 3.2), the latent space complexity significantly increases as the tokenizer scales, which potentially leads to worse downstream AR generation for larger tokenizers. We hypothesize that larger tokenizers tend to capture excessive fine-grained low-level details for better reconstruction, resulting in overly complex latent token distributions, which makes it harder for AR models to learn the token dependencies effectively. + +To address this, we introduce semantic regularization to guide the tokenizer to encode a more semantically consistent latent space, which is less complex and easier for downstream generative modeling. Specifically, we introduce a simple semantic regularization term alongside the tokenizer training objective. The regularization aligns the intermediate features of the tokenizer decoder with the feature representations extracted from pre-trained frozen DINOv2 [43]. + +Mathematically, let $f^{\mathrm{dec},l}$ be the output feature of the $l$ -th layer of the Transformer decoder, $f^{\mathrm{DINO}}$ be the semantic features of a pretrained image encoder (here DINOv2-B [43]). The semantic regularization can be represented as: + +$$ +\mathcal {L} _ {\text {r e g}} = \frac {1}{N} \sum_ {n = 1} ^ {N} \sin \left(f _ {n} ^ {\mathrm {d e c}, l}, \phi \left(f _ {n} ^ {\mathrm {D I N O}}\right)\right) \tag {1} +$$ + +where $N$ is the batch size, $n$ is the image index, $\mathrm{sim}(\cdot ,\cdot)$ is a cosine similarity function, and $\phi (\cdot)$ is an MLP that projects decoder feature $f^{\mathrm{dec},l}$ to match the channel dimension of $f^{\mathrm{DINO}}$ . When training VQ tokenizers, we add the semantic regularization to the original VQGAN [15, 50] objectives: + +![](images/2a947eeb75e4a83e727c41d1f9c1ee05719590673ed1bddb84605054f0989839.jpg) +Figure 5. Training curves for 2.9B XL-XXL tokenizers with and without entropy loss. A 2.9B tokenizer does not converge without entropy loss. The entropy loss encourages high codebook usage and stabilizes training loss. + +$$ +\mathcal {L} _ {\text {t o t a l}} = \mathcal {L} _ {\mathrm {v q g a n}} + \lambda \mathcal {L} _ {\text {r e g}}, \tag {2} +$$ + +and we empirically set $\lambda = 0.5$ in this work. Here $\mathcal{L}_{\mathrm{vqgan}}$ is a combination of multiple losses, including $\mathcal{L}_{\mathrm{recon}}$ , the $l_{2}$ reconstruction loss on image pixels, $\mathcal{L}_{\mathrm{percp}}$ , the perceptual loss [27, 74], $\mathcal{L}_{\mathrm{GAN}}$ , PatchGAN [26] adversarial loss, and $\mathcal{L}_{\mathrm{VQ}}$ [15, 66] the VQ codebook loss. + +# 4.3. Entropy Loss for Billion-Level Tokenizers + +When training a 2.9B tokenizer, we find that using the same training recipe as the 622M tokenizer leads to convergence failure for both perceptual loss and reconstruction loss, and consistently low codebook usage. We hypothesize that low codebook usage accounts for the convergence difficulty. To address this, we incorporate entropy penalty [67, 69] to encourage higher codebook utilization: + +$$ +\mathcal {L} _ {\text {e n t r o p y}} = \mathbb {E} _ {\mathbf {z}} [ H (\hat {\mathbf {z}} | \mathbf {z}) ] - H (\hat {\mathbf {z}}) \tag {3} +$$ + +where $H(\cdot)$ denotes the Shannon entropy, $\mathbf{z} \in \mathbb{R}^D$ is the input for quantizer to be quantized to $\hat{\mathbf{z}} = \mathbf{c}_i \in \mathbb{R}^D$ and $\mathbf{c}_i$ is the $i$ -th codebook vector. $\mathbb{E}_{\mathbf{z}}[H(\hat{\mathbf{z}}|\mathbf{z})]$ penalizes the uncertainty in quantization to reduce quantization error, and $-H(\hat{\mathbf{z}})$ encourages the codebook vectors to be selected more uniformly across the entire codebook. The detailed derivation can be found in our supp. We find that the entropy penalty addresses the convergence difficulty of large tokenizers. As shown in Fig. 5, introducing entropy loss to the 2.9B tokenizer enables the codebook usage to quickly reach a high level, and the loss converges properly3. + +# 5. Experiments + +# 5.1. Settings + +For scaling up visual tokenizers, we follow the architecture configurations for the Transformers in GigaTok tokenizers as summarized in Tab. 1. We evaluate the tokenizers from three perspectives: reconstruction, downstream AR generation, and downstream AR representation quality. We use + +
TypeEnc./Dec.Params.BlocksHeadsDim.
1D Tok.S26M68512
B115M1212768
L405M24161024
XL948M36201280
XXL1870M48241536
2D Tok.S19M68512
B86M1212768
L329M24161024
+ +Table 1. Architectures of the transformer variants for tokenizer encoder/decoder parts in our experiments. We use Q-Former [6, 34] for 1D tokenizers and ViT [13] for 2D tokenizers. + +![](images/f8643373abb334d25b9757a02516d0285a771299f840593828f0c30f059ad67d.jpg) +Figure 6. Correlation between AR Probing Performance and Larger AR models. For 3 tokenizers: S-S, S-L, and B-L, we present that as the tokenizer improves, the performance improvements of AR Probing correlate to the performance improvements of larger AR models. Therefore, the AR Probing can effectively indicate how the tokenizer affects downstream larger AR models with limited computational costs. + +![](images/18a6c7c53b14968caeb727bb6a912f7d19b3dcee70012feb02222289b7bdc5de.jpg) + +rFID and LPIPS [74] to evaluate reconstruction fidelity, gFID to evaluate generation performance, and linear probing to evaluate the representation quality of the downstream AR model. Our downstream AR models are LlamaGen [50] with 1D absolute positional embedding. Our scaling experiments (Sec. 5.2) and ablation study (Sec. 5.3) use AR Probing (111M AR model described in Sec.3.1) validation loss, gFID, and linear probing to reflect the learnability of tokens, generation performance, and representation quality, respectively. While in the system-level comparison (Sec. 5.4), we train larger 1.4B AR models for comparison with previous work. More details are in the supplementary material. + +Effectiveness of AR Probing. As shown in Fig. 6, AR Probing performances including gFID and linear probing accuracy align with the larger LlamaGen-XL [50] model results. Therefore, we use AR Probing throughout the following experiments except for the system-level comparison. + +# 5.2. Scaling with Semantic Regularization + +We demonstrate that our proposed semantic regularization resolves the reconstruction vs. generation dilemma in scaling tokenizers. + +Model scaling with semantic regularization. Results are shown in Fig. 7. (1) Semantic regularization improves the reconstruction fidelity, indicated by lower rFID. (2) More importantly, the AR Probing validation loss and gFID de + +![](images/9acdec04d8802aac59b5a78b7e590b8e1c81de2dec608a2de7b670863a259d32.jpg) +Figure 7. Scaling trends of tokenizers for reconstruction, downstream generation and representation quality with and without semantic regularization. By semantic regularization, GigaTok resolves the reconstruction vs. generation dilemma for tokenizer scaling in contrast to the vanilla version without semantic regularization. Moreover, GigaTok consistently improves the representation quality of downstream AR models by scaling up visual tokenizers. Note that in the last two figures, the red and blue curves correspond to different scales on the y-axis. + +![](images/2257c1fefec13ade9fff8b68b9354ff3fdf8dc5549b55947d1ab3e70a2d4d28e.jpg) +w/o semantic regularization w/ semantic regularization + +![](images/aa782d3f324e1ed9e8dfe46438225cb4c23c64580e41df85bdd83c38411467d8.jpg) + +![](images/4c7ed3e7acb8d6a5d8a3737863302c3fcc97ce6fe3d7eebe9cf13c07e2792d61.jpg) + +![](images/368c793c150808e4ed2a72a13b21b120c092ed9773c0128a63932362974ebca5.jpg) + +![](images/a74a26f6ca59a171d98ac0daf44e41e2db54df84c3c51938647e5d2387d887a0.jpg) +Figure 8. Visualization of tokenizer features with and without semantic regularization. We compute PCA among the tokenizer features of a group of images of the same "golden retriever" class and visualize the first 3 PCA components. We observe that the latent space of vanilla tokenizers shows inconsistent features both within a single image or across multiple semantically similar images. In contrast, GigaTok encodes images with semantic consistency and thus reduces the latent space complexity for AR models. + +grades for larger tokenizers without semantic regularization, showing the reconstruction vs. generation dilemma. The dilemma is addressed with semantic regularization, evidenced by the relatively constrained validation loss and consistently decreasing gFID. (3) The Linear Probing results show that semantic regularization helps AR models to learn better representations as the tokenizer model scales up. + +Visualization for the tokenizer feature space. We visualize the first 3 PCA components of the tokenizer features from the first Transformer decoder layer for a group of images. As shown in Fig. 8, we find the vanilla tokenizer encodes a latent space with limited semantic consistency, which potentially impairs its learnability for downstream AR models. In contrast, GigaTok presents semantically consistent patterns (Fig. 8), indicating a meaningful and consistent latent space. + +# 5.3. Asymmetric 1D Tokenizer is More Scalable + +Tokenizer decoder deserves more parameters. To determine whether the decoder or encoder should be prioritized when scaling up, we compare S-B $^4$ and B-S tokenizers in Tab. 2, both trained under the same setting for 100 + +
Enc./Dec. SizerFID↓LPIPS↓gFID↓Lin Acc.↑
B-S0.980.2216.5664.5
S-B0.940.2145.6559.8
S-L0.830.2065.1960.6
B-L0.810.2064.8266.9
+ +Table 2. The results for scaling encoder/decoder. Prioritizing the scaling of decoders benefits downstream generation more than scaling encoders (S-B v.s. B-S). But scaling encoders can still bring significant improvements (S-L v.s. B-L). + +![](images/bd97cce6fd6b87446461e6eb3df1ebedede28a455da8eb46153e9b4f34ceeb2b.jpg) +Figure 9. Scalability comparison for 1D and 2D tokenizers. Using the same training setting, 1D tokenizers shows better reconstruction (rFID) and downstream representation quality (AR Probing: Lin Acc.). For downstream generation (gFID), 1D tokenizers present a steeper improving trend than 2D tokenizers. + +epochs. Our results show that scaling decoders, rather than encoders, leads to greater improvements in both reconstruction and downstream generation, suggesting that decoder scaling should be prioritized. + +Scaling tokenizer encoder is also important. While prioritizing the scaling of tokenizer decoders yields significant benefits, we also find that scaling tokenizer encoders can further enhance downstream models. In Tab. 2, we show that a B-L tokenizer gains significant improvements compared to an S-L tokenizer. Therefore, we recommend scaling both encoders and decoders while maintaining a larger decoder than the encoder for optimal performance. + +
TokenizerTok. Type/Param.#TokensrFID↓Generator Model/Param.TypegFID↓Acc.↑
Continuous token modeling
VAE [47]KL†55M40960.27LDM-4 [47]400MDiff.3.60-
DiT-XL/2 [44]675MDiff.2.27-
SD-VAE [1]KL†84M10240.62SiT-XL/2 [42]675MDiff.2.06-
SiT-XL/2 + REPA [71]675MDiff.1.4274.6
VA-VAE [65]KL70M2560.28LightningDiT [65]675MDiff.1.35-
VAE [35]KL66M2560.53MAR-H [35]943MAR+Diff.1.5560.0°
Discrete token modeling
VQGAN [8]VQ66M2562.28MaskGIT [8]227MMask.6.18*-
TiTok-S [70]VQ72M1281.71MaskGIT-UViT-L [4, 8]287MMask.1.97-
TiTok-L [70]VQ641M322.21MaskGIT-ViT [8]177MMask.2.77-
B-AE-d32 [22]LFQ66M2561.69BiGR-XXL-d32 [22]1.5BAR+Diff2.36-
BiGR-XL-d32 [22]799MAR+Diff-69.8
VAR-Tok. [53]MSRQ†109M6801.00‡VAR-d24 [53]1.0BVAR2.09-
VAR-d30 [53]2.0BVAR1.92-
ImageFolder [36]MSRQ176M2860.80‡ImageFolder-VAR [36]362MVAR2.60-
VQGAN [15]VQ23M2564.98Taming-Tran. [15]1.4BAR15.78*-
ViT-VQGAN [66]VQ64M10241.28VIM-Large [66]1.7BAR4.17*-
RQ-VAE [33]RQ66M2563.20RQTran. [33]3.8BAR7.55*-
Open-MAGVIT2 [40]LFQ133M2561.17Open-MAGVIT2-XL [40]1.5BAR2.53-
IBQ [49]IBQ128M2561.37IBQ-XXL [49]2.1BAR2.05-
LlamaGen-Tok. [50]VQ72M2562.19LlamaGen-L [50]343MAR3.8140.5°
LlamaGen-XXL [50]1.4BAR3.09-
LlamaGen-Tok. [50]VQ72M5760.94LlamaGen-XXL [50]1.4BAR2.34-
GigaTok-B-LVQ622M2560.51‡LlamaGen-B (1d) [50]111MAR3.3367.7
GigaTok-S-SVQ136M2561.01LlamaGen-B (1d) [50]111MAR4.0562.6
GigaTok-S-BVQ232M2560.89LlamaGen-B (1d) [50]111MAR3.8362.9
GigaTok-B-LVQ622M2560.81LlamaGen-B (1d) [50]111MAR3.2667.6
LlamaGen-XXL (1d) [50]1.4BAR2.03*69.4
GigaTok-XL-XXLVQ2.9B2560.79LlamaGen-B (1d) [50]111MAR3.1572.0
LlamaGen-XXL (1d) [50]1.4BAR1.98*74.0
+ +Table 3. System-level comparison for tokenizers and downstream generation models on ImageNet $256 \times 256$ . For gFID, we present the lowest value between w/ or w/o CFG scenarios. †: Training set includes data besides ImageNet. ‡: Using frozen DINO [7] for discriminator, which largely improves rFID. ☆: Without classifier-free-guidance. ◇: Data from BiGR [22]. + +1D tokenizers are more scalable than 2D tokenizers. We train S-S, S-B and B-L 1D/2D tokenizers with the same setting with semantic regularization. As shown in Fig. 9, 1D tokenizers consistently achieve better rFID and AR Probing linear probing accuracy than 2D tokenizers. For AR Probing gFID, the 1D tokenizers exhibit a steeper scaling trend, eventually surpassing 2D tokenizers as the model scales. We attribute the superior scalability of 1D tokenizers to the reduced inductive bias. + +# 5.4. System-level Comparison + +Experiment Settings. Using GigaTok for tokenization, we scale the training of LlamaGen [50] AR models on $256 \times 256$ ImageNet training set for 300 epochs to compare with other methods. We do not use AdaLN [44, 53] as it is specific for class-conditional generation. We provide + +the results of a B-L tokenizer trained with DINO discriminator [36, 53] to fairly compare rFID. But in practice we find DINO discriminator provides limited improvement for LPIPS and may affect the training stability of billion-scale tokenizers. Therefore, we exclude it from our main design. + +Results. As shown in Tab. 3, our 2.9B GigaTok achieves state-of-the-art reconstruction performance (rIFD) among all discrete tokenizers. Furthermore, with our 2.9B tokenizer, the downstream 1.4B AR model achieves state-of-the-art image generation performance (gFID) among LLM-style autoregressive next-token-prediction models. VAR [53] predicts images with next-scale prediction rather than next-token-prediction, which is less compatible with language models. Our model achieves comparable gFID to VAR [53] with a simple LLM-style downstream AR genera + +
Decoder\AR Model SizeBLXXL
B3.7%2.3%1.3%
L11.2%7.0%3.4%
XXL32.4%20.3%9.9%
+ +tor without incorporating vision-specific designs like VAR. Moreover, this 1.4B AR model trained on the 2.9B tokenizer achieves state-of-the-art linear probing accuracy via visual generative pretraining5. This indicates that our GigaTok helps the downstream generation model to learn better representations. The high-quality representation learned from generative pre-training may also help unify generation and understanding for future native multimodal models. + +# 5.5. Discussion and Ablation Study + +Table 4. Ratio of time consumptions for tokenizer decoding during image generation. When we use a 2.9B XLXXL tokenizer for a 1.4B LlamaGen-XXL AR model, the tokenizer decoding only takes $9.9\%$ of the total inference time. + +
Align. Layer lrFID↓LPIPS↓gFID↓Lin Acc.↑
21.060.2246.2663.4
31.010.2236.1061.9
41.070.2236.0758.6
+ +Table 5. Layer $l$ for semantic regularization (S-S tokenizer). Smaller $l$ brings better downstream AR model representations but can sacrifice reconstruction and downstream generation quality. We choose $l = 3$ by default for more balanced performance. + +
Sem. Enc.rFID↓LPIPS↓gFID↓Lin Acc.↑
CLIP [16, 46]0.910.2106.3561.4
SigLIP [72]0.920.2106.2056.7
DINOv2-B [43]0.850.2125.5564.4
+ +Discussion on generation costs. When generating an image, AR models take multiple passes to predict tokens, while tokenizers only need one forward pass. Therefore, the time consumption for decoding tokens to images is relatively small compared to AR models. We record the ratio of time spent on tokenizer decoding for different tokenizer/AR models in Tab. 4. For a 1.4B AR model, our largest 2.9B tokenizer takes only $\sim 10\%$ of the total inference time. + +Table 6. Ablation study for the choice of pretrained semantic encoders (S-B tokenizer). DINOv2-B delivers the best performance among all models. + +
Sem. Reg. λrFID↓LPIPS↓gFID↓Lin Acc.↑
0.251.280.2266.2757.0
0.501.220.2286.3958.6
0.751.270.2366.2958.6
1.001.380.2396.2762.5
+ +Table 7. Ablation Study for the semantic regularization weight (S-S tokenizer). A strong semantic regularization weight leads to worse reconstruction but better downstream representation. We choose $\lambda = 0.5$ by default for more balanced performance. + +Searching the best layer for semantic regularization. We search $l$ , the layer's index in the Transformer decoder before intermediate features are extracted to calculate semantic regularization in Eq. 1. As shown in Tab. 5, varying $l$ presents a trade-off between gFID and the Lin Acc. for AR Probing. Smaller $l$ means stricter regularization for the latent space so that the downstream generation models learn better representation. However, smaller $l$ also sacrifices generation quality. We choose $l = 3$ for a more balanced rFID, gFID, and linear probing accuracy for all tokenizers. + +Exploring pretrained semantic encoder choices. We compare CLIP (DFN) [16, 46], SigLIP-400M [72] and DINOv2-B [43] as the source of semantic regularization for S-B tokenizers. As shown in Tab. 6, utilizing DINOv2-B as the semantic encoder for regularization produces the best tokenizer for reconstruction, downstream class conditional generation and representation quality. + +Exploring weights for semantic regularization. We study the effects of different regularization weights $\lambda$ (Eq. 2), from 0.25 to 1.00. As shown in Tab. 7, a large $\lambda$ (0.75, 1.00) will damage the reconstruction quality but benefits the linear probing accuracy, whereas smaller $\lambda$ (0.25) results in suboptimal rFID and linear probing accuracy. We choose the more balanced $\lambda = 0.5$ as a default for all tokenizers. + +# 6. Conclusion + +In this work, we study and address the reconstruction vs. generation dilemma for scaling visual tokenizers. We identify that the dilemma stems from increasing latent space complexity in larger tokenizers. We propose semantic regularization to effectively regularize the tokenizer latent space by injecting pre-trained representations to align with tokenizer features in training. The semantic regularization, together with several key practices we explored, lead to the first 3B tokenizer, GigaTok, that achieves state-of-the-art reconstruction, downstream AR generation, and downstream AR representation quality. Please refer to discussions on limitations and future work in supplementary materials. + +# Acknowledgments + +This work is partially supported by the National Nature Science Foundation of China (No. 62402406). + +The authors also sincerely thank Qihang Yu and Liang-Chieh Chen for their valuable discussions during the development of GigaTok. + +# References + +[1] stabilityyai/sd-vae-ft-ema. https://huggingface.co/stabilityyai/sd-vae-ft-ema, 2023.7 +[2] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023. 1 +[3] Roman Bachmann, Jesse Allardice, David Mizrahi, Enrico Fini, Oğuzhan Fatih Kar, Elmira Amirloo, Alaaeldin El-Nouby, Amir Zamir, and Afshin Dehghan. Flextok: Resampling images into 1d token sequences of flexible length. arXiv preprint arXiv:2502.13967, 2025. 3 +[4] Fan Bao, Shen Nie, Kaiwen Xue, Yue Cao, Chongxuan Li, Hang Su, and Jun Zhu. All are worth words: A vit backbone for diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 22669-22679, 2023. 7 +[5] Xiao Bi, Deli Chen, Guanting Chen, Shanhuang Chen, Damai Dai, Chengqi Deng, Honghui Ding, Kai Dong, Qiushi Du, Zhe Fu, et al. Deepseek llm: Scaling open-source language models with longtermism. arXiv preprint arXiv:2401.02954, 2024. 1 +[6] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European conference on computer vision, pages 213-229. Springer, 2020. 4, 5, 1 +[7] Mathilde Caron, Hugo Touvron, Ishan Misra, Hervé Jégou, Julien Mairal, Piotr Bojanowski, and Armand Joulin. Emerging properties in self-supervised vision transformers. In Proceedings of the IEEE/CVF international conference on computer vision, pages 9650-9660, 2021. 2, 7, 3 +[8] Huiwen Chang, Han Zhang, Lu Jiang, Ce Liu, and William T Freeman. Maskgit: Masked generative image transformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11315-11325, 2022. 2, 7 +[9] Hao Chen, Ze Wang, Xiang Li, Xineng Sun, Fangyi Chen, Jiang Liu, Jindong Wang, Bhiksha Raj, Zicheng Liu, and Emad Barsoum. Softvq-vae: Efficient 1-dimensional continuous tokenizer. arXiv preprint arXiv:2412.10958, 2024. 3 +[10] Hao Chen, Yujin Han, Fangyi Chen, Xiang Li, Yidong Wang, Jindong Wang, Ze Wang, Zicheng Liu, Difan Zou, and Bhiksha Raj. Masked autoencoders are effective tokenizers for diffusion models. arXiv preprint arXiv:2502.03444, 2025. 3 +[11] Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, and Ilya Sutskever. Generative pre + +training from pixels. In International conference on machine learning, pages 1691-1703. PMLR, 2020. 3 +[12] Xiaokang Chen, Zhiyu Wu, Xingchao Liu, Zizheng Pan, Wen Liu, Zhenda Xie, Xingkai Yu, and Chong Ruan. Januspro: Unified multimodal understanding and generation with data and model scaling. arXiv preprint arXiv:2501.17811, 2025. 1 +[13] Alexey Dosovitskiy. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020. 1, 4, 5 +[14] Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024. 1 +[15] Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 12873-12883, 2021. 1, 2, 4, 5, 7 +[16] Alex Fang, Albin Madappally Jose, Amit Jain, Ludwig Schmidt, Alexander Toshev, and Vaishaal Shankar. Data filtering networks. arXiv preprint arXiv:2309.17425, 2023. 8 +[17] Christopher Fifty, Ronald G Junkins, Dennis Duan, Aniketh Iger, Jerry W Liu, Ehsan Amid, Sebastian Thrun, and Christopher Ré. Restructuring vector quantization with the rotation trick. arXiv preprint arXiv:2410.06424, 2024. 2 +[18] Yuying Ge, Yixiao Ge, Ziyun Zeng, Xintao Wang, and Ying Shan. Planting a seed of vision in large language model. arXiv preprint arXiv:2307.08041, 2023. 3 +[19] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025. 1 +[20] Alexander Hagiéle, Elie Bakouch, Atli Kosson, Loubna Ben Allal, Leandro Von Werra, and Martin Jaggi. Scaling laws and compute-optimal training beyond fixed training durations. arXiv preprint arXiv:2405.18392, 2024. 1 +[21] Philippe Hansen-Estruch, David Yan, Ching-Yao Chung, Orr Zohar, Jialiang Wang, Tingbo Hou, Tao Xu, Sriram Vishwanath, Peter Vajda, and Xinlei Chen. Learnings from scaling visual tokenizers for reconstruction and generation. arXiv preprint arXiv:2501.09755, 2025. 1, 4 +[22] Shaozhe Hao, Xuantong Liu, Xianbiao Qi, Shihao Zhao, Bojia Zi, Rong Xiao, Kai Han, and Kwan-Yee K Wong. Bigrr: Harnessing binary latent codes for image generation and improved visual representation capabilities. arXiv preprint arXiv:2410.14672, 2024. 7 +[23] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, and Ross Girshick. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 16000-16009, 2022. 2, 3 +[24] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilib- + +rium. Advances in neural information processing systems, 30, 2017. 3 +[25] Shengding Hu, Yuge Tu, Xu Han, Chaoqun He, Ganqu Cui, Xiang Long, Zhi Zheng, Yewei Fang, Yuxiang Huang, Weilin Zhao, et al. Minicpm: Unveiling the potential of small language models with scalable training strategies. arXiv preprint arXiv:2404.06395, 2024. 1 +[26] Phillip Isola, Jun-Yan Zhu, Tinghui Zhou, and Alexei A Efros. Image-to-image translation with conditional adversarial networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1125-1134, 2017. 5 +[27] Justin Johnson, Alexandre Alahi, and Li Fei-Fei. Perceptual losses for real-time style transfer and super-resolution. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part II 14, pages 694-711. Springer, 2016. 5 +[28] Diederik P Kingma. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013. 2, 4 +[29] Diederik P Kingma, Max Welling, et al. An introduction to variational autoencoders. Foundations and Trends® in Machine Learning, 12(4):307-392, 2019. 4 +[30] Dan Kondratyuk, Lijun Yu, Xiuye Gu, Jose Lezama, Jonathan Huang, Grant Schindler, Rachel Hornung, Vighnesh Birodkar, Jimmy Yan, Ming-Chang Chiu, Krishna Somandepalli, Hassan Akbari, Yair Alon, Yong Cheng, Joshua V. Dillon, Agrim Gupta, Meera Hahn, Anja Hauth, David Hendon, Alonso Martinez, David Minnen, Mikhail Sirotenko, Kihyuk Sohn, Xuan Yang, Hartwig Adam, Ming-Hsuan Yang, Irfan Essa, Huisheng Wang, David A Ross, Bryan Seybold, and Lu Jiang. Videopoet: A large language model for zero-shot video generation. In Proceedings of the 41st International Conference on Machine Learning, 2024. 1 +[31] Tuomas Kynkänniemi, Miika Aittala, Tero Karras, Samuli Laine, Timo Aila, and Jaakko Lehtinen. Applying guidance in a limited interval improves sample and distribution quality in diffusion models. arXiv preprint arXiv:2404.07724, 2024. 1 +[32] Yann LeCun, Yoshua Bengio, et al. Convolutional networks for images, speech, and time series. The handbook of brain theory and neural networks, 3361(10):1995, 1995. 4 +[33] Doyup Lee, Chiheon Kim, Saehoon Kim, Minsu Cho, and Wook-Shin Han. Autoregressive image generation using residual quantization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11523-11532, 2022. 2, 7 +[34] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In International conference on machine learning, pages 19730-19742. PMLR, 2023. 4, 5, 1 +[35] Tianhong Li, Yonglong Tian, He Li, Mingyang Deng, and Kaiming He. Autoregressive image generation without vector quantization. arXiv preprint arXiv:2406.11838, 2024. 7 +[36] Xiang Li, Kai Qiu, Hao Chen, Jason Kuen, Jiquiang Gu, Bhiksha Raj, and Zhe Lin. Imagefolder: Autoregres + +sive image generation with folded tokens. arXiv preprint arXiv:2410.01756, 2024. 2, 3, 7, 4 +[37] Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024. 1 +[38] Dongyang Liu, Shitian Zhao, Le Zhuo, Weifeng Lin, Yu Qiao, Hongsheng Li, and Peng Gao. Lumina-mgpt: Illuminate flexible photorealistic text-to-image generation with multimodal generative pretraining. arXiv preprint arXiv:2408.02657, 2024. 2 +[39] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In International Conference on Learning Representations, 2019. 2 +[40] Zhuoyan Luo, Fengyuan Shi, Yixiao Ge, Yujiu Yang, Limin Wang, and Ying Shan. Open-magvit2: An open-source project toward democratizing auto-regressive visual generation. arXiv preprint arXiv:2409.04410, 2024. 2, 4, 7 +[41] Chuofan Ma, Yi Jiang, Junfeng Wu, Jihan Yang, Xin Yu, Zehuan Yuan, Bingyue Peng, and Xiaojuan Qi. Unitok: A unified tokenizer for visual generation and understanding. arXiv preprint arXiv:2502.20321, 2025. 3 +[42] Nanye Ma, Mark Goldstein, Michael S Albergo, Nicholas M Boffi, Eric Vanden-Eijnden, and Saining Xie. Sit: Exploring flow and diffusion-based generative models with scalable interpolant transformers. In European Conference on Computer Vision, pages 23-40. Springer, 2024. 7 +[43] Maxime Oquab, Timothee Darcet, Theo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193, 2023. 2, 4, 8, 3 +[44] William Peebles and Saining Xie. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4195-4205, 2023. 7, 1 +[45] Liao Qu, Huichao Zhang, Yiheng Liu, Xu Wang, Yi Jiang, Yiming Gao, Hu Ye, Daniel K Du, Zehuan Yuan, and Xinglong Wu. Tokenflow: Unified image tokenizer for multimodal understanding and generation. arXiv preprint arXiv:2412.03069, 2024. 1 +[46] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PmLR, 2021. 2, 8 +[47] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 7, 1 +[48] Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. International journal of computer vision, 115:211-252, 2015. 3 + +[49] Fengyuan Shi, Zhuoyan Luo, Yixiao Ge, Yujiu Yang, Ying Shan, and Limin Wang. Taming scalable visual tokenizer for autoregressive image generation. arXiv preprint arXiv:2412.02692, 2024. 2, 7 +[50] Peize Sun, Yi Jiang, Shoufa Chen, Shilong Zhang, Bingyue Peng, Ping Luo, and Zehuan Yuan. Autoregressive model beats diffusion: Llama for scalable image generation. 2024. 1, 2, 3, 4, 5, 7 +[51] Gemini Team, Rohan Anil, Sebastian Borgeaud, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, Katie Millican, et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023. 1 +[52] Gemini Team, Petko Georgiev, Ving Ian Lei, Ryan Burnell, Libin Bai, Anmol Gulati, Garrett Tanzer, Damien Vincent, Zhufeng Pan, Shibo Wang, et al. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530, 2024. 1 +[53] Keyu Tian, Yi Jiang, Zehuan Yuan, BINGYUE PENG, and Liwei Wang. Visual autoregressive modeling: Scalable image generation via next-scale prediction. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. 2, 7, 1, 3 +[54] Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023. 1, 3 +[55] Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023. 1 +[56] Aaron Van Den Oord, Oriol Vinyals, et al. Neural discrete representation learning. Advances in neural information processing systems, 30, 2017. 2 +[57] A Vaswani. Attention is all you need. Advances in Neural Information Processing Systems, 2017. 4 +[58] Hanyu Wang, Saksham Suri, Yixuan Ren, Hao Chen, and Abhinav Shrivastava. Larp: Tokenizing videos with a learned autoregressive generative prior. In ICLR, 2025. 2 +[59] Luting Wang, Yang Zhao, Zijian Zhang, Jiashi Feng, Si Liu, and Bingyi Kang. Image understanding makes for a good tokenizer for image generation. arXiv preprint arXiv:2411.04406, 2024. 3 +[60] Xinlong Wang, Xiaosong Zhang, Zhengxiong Luo, Quan Sun, Yufeng Cui, Jinsheng Wang, Fan Zhang, Yueze Wang, Zhen Li, Qiying Yu, et al. Emu3: Next-token prediction is all you need. arXiv preprint arXiv:2409.18869, 2024. 1, 2 +[61] Mark Weber, Lijun Yu, Qihang Yu, Xueqing Deng, Xiaohui Shen, Daniel Cremers, and Liang-Chieh Chen. Maskbit: Embedding-free image generation via bit tokens. arXiv preprint arXiv:2409.16211, 2024. 2 +[62] Chengyue Wu, Xiaokang Chen, Zhiyu Wu, Yiyang Ma, Xingchao Liu, Zizheng Pan, Wen Liu, Zhenda Xie, Xingkai Yu, Chong Ruan, et al. Janus: Decoupling visual encoding for unified multimodal understanding and generation. arXiv preprint arXiv:2410.13848, 2024. 1 + +[63] Yecheng Wu, Zhuoyang Zhang, Junyu Chen, Haotian Tang, Dacheng Li, Yunhao Fang, Ligeng Zhu, Enze Xie, Hongxu Yin, Li Yi, et al. Vila-u: a unified foundation model integrating visual understanding and generation. arXiv preprint arXiv:2409.04429, 2024. 3 +[64] Wanghan Xu, Xiaoyu Yue, Zidong Wang, Yao Teng, Wenlong Zhang, Xihui Liu, Luping Zhou, Wanli Ouyang, and Lei Bai. Exploring representation-aligned latent space for better generation. arXiv preprint arXiv:2502.00359, 2025. +[65] Jingfeng Yao and Xinggang Wang. Reconstruction vs. generation: Taming optimization dilemma in latent diffusion models. arXiv preprint arXiv:2501.01423, 2025. 3, 7, 4 +[66] Jiahui Yu, Xin Li, Jing Yu Koh, Han Zhang, Ruoming Pang, James Qin, Alexander Ku, Yuanzhong Xu, Jason Baldridge, and Yonghui Wu. Vector-quantized image modeling with improved vqgan. arXiv preprint arXiv:2110.04627, 2021. 1, 2, 5, 7 +[67] Lijun Yu, Yong Cheng, Kihyuk Sohn, José Lezama, Han Zhang, Huiwen Chang, Alexander G Hauptmann, Ming-Hsuan Yang, Yuan Hao, Irfan Essa, et al. Magvit: Masked generative video transformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10459-10469, 2023. 5, 1 +[68] Lijun Yu, Yong Cheng, Zhiruo Wang, Vivek Kumar, Wolfgang Macherey, Yanping Huang, David Ross, Irfan Essa, Yonatan Bisk, Ming-Hsuan Yang, et al. Spae: Semantic pyramid autoencoder for multimodal generation with frozen llms. Advances in Neural Information Processing Systems, 36:52692-52704, 2023. 3 +[69] Lijun Yu, José Lezama, Nitesh B Gundavarapu, Luca Versari, Kihyuk Sohn, David Minnen, Yong Cheng, Vighnesh Birodkar, Agrim Gupta, Xiuye Gu, et al. Language model beats diffusion-tokenizer is key to visual generation. arXiv preprint arXiv:2310.05737, 2023. 1, 2, 4, 5 +[70] Qihang Yu, Mark Weber, Xueqing Deng, Xiaohui Shen, Daniel Cremers, and Liang-Chieh Chen. An image is worth 32 tokens for reconstruction and generation. arXiv preprint arXiv:2406.07550, 2024. 2, 7, 3 +[71] Sihyun Yu, Sangkyung Kwak, Huiwon Jang, Jongheon Jeong, Jonathan Huang, Jinwoo Shin, and Saining Xie. Representation alignment for generation: Training diffusion transformers is easier than you think. arXiv preprint arXiv:2410.06940, 2024. 3, 7, 8, 2 +[72] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In Proceedings of the IEEE/CVF international conference on computer vision, pages 11975-11986, 2023. 2, 8 +[73] Baoquan Zhang, Huaibin Wang, Chuyao Luo, Xutao Li, Guotao Liang, Yunming Ye, Xiaochen Qi, and Yao He. Codebook transfer with part-of-speech for vector-quantized image modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7757–7766, 2024. 3 +[74] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 586-595, 2018. 5 + +[75] Yue Zhao, Yuanjun Xiong, and Philipp Krajhenbuhl. Image and video tokenization with binary spherical quantization. arXiv preprint arXiv:2406.07548, 2024. 2 +[76] Lei Zhu, Fangyun Wei, Yanye Lu, and Dong Chen. Scaling the codebook size of vqgan to 100,000 with a utilization rate of $99\%$ . arXiv preprint arXiv:2406.11837, 2024. 2, 3, 4 +[77] Yongxin Zhu, Bocheng Li, Hang Zhang, Xin Li, Linli Xu, and Lidong Bing. Stabilize the latent space for image autoregressive modeling: A unified perspective. arXiv preprint arXiv:2410.12490, 2024. 3 + +# GigaTok: Scaling Visual Tokenizers to 3 Billion Parameters for Autoregressive Image Generation + +Supplementary Material + +# A. Limitations and Future Work + +This study primarily focuses on scaling tokenizers for class-conditional image generation. While we have demonstrated the effectiveness of GigaTok for downstream class-conditional generation, expanding the scope to include text-conditional image generation or video generation remains an open avenue for future work. Additionally, unlike CNN-based 2D tokenizers, 1D Transformer-based tokenizers are not directly applicable to multiple resolutions without additional training adjustments. This challenge presents an important direction for further exploration. Besides scaling the model sizes of tokenizers, the effect of scaling training data, codebook dimension and codebook size for downstream autoregressive generation are left for future research. + +# B. Configurations for AR models + +
SizeParams.BlocksHeadsDim.
B111M1212768
L343M24161024
XL775M36201280
XXL1.4B48241536
+ +Table 8. Architectures of the LLamaGen models in our experiments. + +AR model training. We scale up the training of downstream Llama-style [50, 54] AR models to compare generation performance with other models. For model training, we use WSD learning rate scheduler [20, 25] with $1 \times 10^{-4}$ base learning rate, 0.2 decay ratio and 1 epoch warm-up. We do not use AdaLN [44, 53] as it is specific for class-conditional generation. We use a batch size of 256 for training the B, L and XL models and a 512 batch size for training the XXL model. Our AR models are trained for 300 epochs on the $256 \times 256$ ImageNet training set. + +CFG for gFID. Since gFID of GPT models can be largely affected by classifier free guidance (CFG) [47, 50] and often has an optimal CFG [50], for fair comparison, we search the optimal CFG using zero-order search with a step of 0.25 and use the lowest gFID as the final value. For AR Probing, we use constant CFG scheduling for simplicity. For system-level comparison, we use a step function for CFG scheduling inspired by [31]. Specifically, the AR models predict the first $18\%$ tokens without CFG, i.e., $\mathrm{CFG} = 1$ for better diversity, and use CFG for the remaining tokens + +![](images/0aabb2d424621a2772efa50fc41a2ba598fc16ac33f0b541e7ec1e13c62263ba.jpg) +Figure 10. The architecture of GigaTok with Q-Former. + +![](images/55b90d7df7255b979b3f0cfa226e89b47598a89009decc9c68523c8829374359.jpg) +Figure 11. Initialization of 1D queries in Q-Former modules. + +for better visual quality. Interestingly, we find that the 1.4B LlamaGen model achieves the best gFID without CFG. + +# C. Detailed GigaTok Implementation + +Please refer to Tab. 9 for training details. + +Q-Fomrer in GigaTok. GigaTok utilizes Q-Former [6, 34] to build 1D tokenizers, as shown in Fig. 10. For Q-Former encoder in GigaTok, we initialize the 1D queries initialized from the 2D input features of the CNN encoder using a multi-level average pooling strategy, as shown in Fig. 11. Specifically, for the same 2D input features, we spatially divide them with different granularity at different levels, and perform average pooling for every divided region at each level. The pooled features are flattened and concatenated from level 0 to the last level. Therefore, a 1D token sequence with $2^{L}$ length can be initialized with $L$ levels from 2D input features. At the decoding stage, the 2D queries are all initialized from the first 1D latent feature. + +Entropy Loss for VQ Tokenizers. While entropy loss [67, + +
ConfigurationS-SS-BS-LB-LXL-XXL
Q-Former Encoder depth6661236
Q-Former Encoder heads8881220
Q-Former Encoder dim.5125125127681280
Q-Former Decoder depth612242448
Q-Former Decoder heads.812161624
Q-Former Decoder dim.512768102410241536
Params (M)1362325336222896
Codebook size16384
Codebook dimension8
#Tokens256
Training epochs100200200200300
Batch size128128256256256
Alignment Layer l3
Learning rate scheduleCosine Decay
Base learning rate\( 1 \times 10^{-4} \)
Minimum learning rate\( 1 \times 10^{-5} \)
LR warm-up iterations00005000
OptimizerAdamW[39]
Opt. momentum\( \beta_1 = 0.9, \beta_2 = 0.95 \)
Entropy Loss weight0000\( 5 \times 10^{-3} \)
+ +Table 9. GigaTok configuration and default training details + +[69] is discussed for LFQ [69], its application to VQ tokenizers is less commonly explained. We provide a detailed derivation of the entropy loss specifically for VQ tokenizers. Mathematically, for quantization process from continuous vector $\mathbf{z} \in \mathbb{R}^D$ to quantized vector $\hat{\mathbf{z}} = \mathbf{c}_i \in \mathbb{R}^D$ where $\mathbf{c}_i$ is the $i$ -th codebook vector from codebook $\mathbf{C} \in \mathbb{R}^{N \times D}$ , we assume this process is statistical and follows the following distribution: + +$$ +p (\hat {\mathbf {z}} = \mathbf {c} _ {i} | \mathbf {z}) \triangleq \operatorname {s o f t m a x} (- l _ {2} (\mathbf {z}, \mathbf {C})) [ i ] \tag {4} +$$ + +where $l_{2}(\mathbf{z},\mathbf{C})\in \mathbb{R}^{N}$ is the $L_{2}$ distance between $\mathbf{z}$ and all the codebook vectors. Then, minimization of the quantization error can be partially achieved by minimizing the expectation of entropy $\mathbb{E}_{\mathbf{z}}[H(\hat{\mathbf{z}} |\mathbf{z})]$ , which can be understood as maximizing the prediction confidence for $p(\hat{\mathbf{z}} |\mathbf{z})$ . To encourage higher codebook utilization, we aim to make the average appearance probability of codebook vectors more uniform. This is achieved by maximizing the entropy $H(\hat{\mathbf{z}})$ . Therefore, the optimization of the two entropy terms leads to the final entropy loss equation: + +$$ +\mathcal {L} _ {\text {e n t r o p y}} = \mathbb {E} _ {\mathbf {z}} [ H (\hat {\mathbf {z}} | \mathbf {z}) ] - H (\hat {\mathbf {z}}) \tag {5} +$$ + +In practice, to calculate $H(\hat{\mathbf{z}})$ , we estimate $p(\hat{\mathbf{z}} = \mathbf{c}_i)$ by $p(\hat{\mathbf{z}} = \mathbf{c}_i) = \mathbb{E}_{\mathbf{z}}[p(\hat{\mathbf{z}} = \mathbf{c}_i|\mathbf{z})]$ . Note that entropy loss is not our contribution. We only provide a detailed definition of entropy loss in VQ scenarios for better understanding. + +Additional implementation details. To stabilize the training of our tokenizer with a hybrid architecture, we initially use a shortcut feature reconstruction trick at the first 15k iterations of the tokenizer training. But we later found that this trick can be replaced with a simple 1-epoch learning rate warmup combined with entropy loss [15, 69]. Specifically for this trick, we additionally give the output feature of the CNN encoder to the CNN decoder directly to be trained for reconstruction, and also align the output feature of the Transformer decoder to the output feature of the CNN encoder, besides the original training objectives. Note that this strategy is complex and can even hinder performance for XL-XXL tokenizers. We recommend using the learning rate warmup combined with entropy loss [15, 69] instead, for both XL-XXL tokenizer and the smaller ones. Additionally, we utilize the rotation trick [17] for all tokenizers, though we observe its effect on performance to be limited for our tokenizer. The implementation of the semantic regularization is partially inspired by REPA [71]. + +# D. Full Evaluation Results and Analysis + +Here we present the full evaluation results for the tokenizers and downstream AR models, as summarized in Tab. 10. We observe that scaling up visual tokenizers consistently improves the reconstruction quality across multiple metrics. Interestingly, for the 1.4B AR model, the lowest gFID is obtained without applying any CFG. This phenomenon is + +
TokenizerParam. rFID↓LPIPS↓PSNR↑SSIM↑AR ModelParam. gFID↓Acc.↑IS↑Precision↑Recall↑
LlamaGen-Tok. [50]72M2.19-20.790.675LlamaGen-B [50]111M5.46-193.610.830.45
GigaTok-S-S136M1.010.222620.740.670LlamaGen-B (1d) [50]111M4.0562.6240.610.810.51
GigaTok-S-B232M0.890.212120.930.677LlamaGen-B (1d) [50]111M3.8362.9233.310.830.51
GigaTok-B-L622M0.810.205921.210.685LlamaGen-B (1d) [50]111M3.2667.6221.020.810.56
LlamaGen-XXL (1d) [50]1.4B2.03*69.4238.520.800.63
GigaTok-B-L622M0.51‡0.20621.320.691LlamaGen-B (1d) [50]111M3.3367.7265.430.800.56
GigaTok-XL-XXL2.9B0.790.194721.650.699LlamaGen-B (1d) [50]111M3.1572.0224.280.820.55
LlamaGen-XXL (1d) [50]1.4B1.98*74.0256.760.810.62
+ +Table 10. Full results for our tokenizers and AR models on ImageNet ${256} \times {256}$ . For gFID,we present the lowest value between w/ or w/o CFG scenarios. $\ddagger$ : Using frozen DINO [7] for discriminator,which largely improves rFID. $\star$ : Without classifier-free-guidance. + +also observed in the concurrent work FlexTok [3], despite significant differences between GigaTok and FlexTok. We hypothesize that semantic regularization might be the primary contributing factor for this phenomenon. + +Discussion on Scaling and Enhancing the Discriminator. Recently, VAR [53], ImageFolder [36], and the concurrent work UniTok [41] have begun leveraging DINO-based discriminators [7, 43] to enhance tokenizer training, achieving impressive improvements in rFID scores. We have also experimented with the same DINO discriminator configuration as VAR. Our results indicate that although rFID scores improve, the downstream generation quality improvements are less significant, as detailed in Tab. 10. Furthermore, when applying the DINO discriminator to XL-XXL tokenizers, we observed that adversarial training frequently encounters instability. Specifically, a strong discriminator quickly learns to distinguish reconstructed samples, diminishing the benefits of adversarial training and leading to blurry artifacts. We leave further exploration of discriminator scaling and enhancement strategies for future work. + +# E. Training Tokenizers for More Iterations + +While we largely resolve the reconstruction vs. generation dilemma regarding tokenizer model scaling, this challenge persists for tokenizer training duration scaling. To illustrate this phenomenon, we train five S-S tokenizers ranging from 40 to 120 epochs using a cosine learning rate scheduler, as detailed in Tab. 9. The results are presented in Fig. 12. + +When extending tokenizer training iterations, reconstruction quality consistently improves. However, downstream generation quality initially improves but subsequently degrades with further increases in tokenizer training duration. Additionally, the validation loss of AR probing continuously rises with longer tokenizer training, regardless of semantic regularization. This trend suggests an increasing complexity in the tokenizer's latent space as the training duration extends. + +We hypothesize that data scaling may alleviate this is- + +sue, and leave it for future exploration. In practice, allocating computational resources toward model scaling rather than extended training duration may yield better tokenizer performance. + +# F. Linear Probing Accuracy of Tokenizers + +We show that the linear probing accuracy of the tokenizer encoders may not necessarily indicate the performance of downstream AR models. We utilize the intermediate checkpoints during the training of B-L and XL-XXL tokenizers for evaluation. As shown in Fig. 13, the XL-XXL tokenizer encoder presents an overfitting trend in terms of tokenizer encoder linear probing accuracy. However, this overfitting trend is not reflected in AR Probing linear probing accuracy or gFID. Therefore, the linear probing accuracy of the tokenizer encoders may not be a good indicator of downstream model performance. Similarly, a concurrent work UniTok [41], also points out that the performance of the tokenizer encoder in terms of zero-shot ImageNet classification accuracy may not necessarily reflect the visual understanding ability of downstream LLMs trained on the tokenizer. + +The abnormality for large tokenizers reveals that the linear probing accuracy of the tokenizer is not necessarily a good indicator for downstream generation models. Since we care more about the representation learning for downstream models than for the tokenizers, using AR Probing as a direct evaluating method is better than indirect tokenizer linear probing accuracy. + +# G. More Discussions About Related Work + +TiTok [70] explores the use of 1D Transformer-based tokenizers under a high compression rate setting. TiTok seminally explores the model scaling of visual tokenizers and uses larger tokenizers for higher compression rate. However, the reconstruction vs. generation dilemma for scaling tokenizers is not solved in TiTok. As a result, the best generation model in TiTok is still trained on its smallest tokenizer variant. + +![](images/e205c5e0363951781746ffb7f15f0fea7dee10370ed96939e69739e03db2597f.jpg) +Figure 12. Training duration scaling trends of tokenizers for reconstruction, downstream generation and representation quality with and without semantic regularization. Note that in the last two figures, the red and blue curves correspond to different scales on the y-axis. + +![](images/b397294c7c93b5ced9f442cccae73fbd6a03a99b75c046a878da0d7cb5af83a2.jpg) + +![](images/a0b2c71f7aa19b9521226367fa6f7ed0a44912ef10f9158ae65713269a8c6225.jpg) + +![](images/501724ecdc813e1787bc98b3a565459f1ad00649558af51fafe3505ee36c94c5.jpg) + +![](images/31b1d65e2e767d9a05af9902ebe910c062397920ef6b942299dd20cf5271bfa2.jpg) + +![](images/5b9beeef0eac54b87f8240947c7f51792dba750f90fc05d1fd7ac427dc9bfe16.jpg) + +![](images/2610462c1e32867a02d8209cfa0a22885a2e20c2ae3cb320c384a615f8ad054c.jpg) +Figure 13. The linear probing accuracy of tokenizer encoders does not necessarily reflect downstream model performance. As the training proceeds, the XL-XXL tokenizer encoder presents an overfitting trend measured by linear probing accuracy, but downstream model performances consistently improve. + +![](images/3e802de3a2022c636c39f13e07b3c7a49f1c95e42b86c55ecc2c4e86f8885e6d.jpg) + +![](images/c3261129c9a4a3015e129369727cb9928d0a5508af5ef7763dbd5e72adc541d6.jpg) + +ViTok [21] is a concurrent work which has explored the effect of model scaling for VAE [28]. ViTok evaluates its VAE models in terms of both reconstruction and downstream diffusion generation performance. While having a very different setting from GigaTok, ViTok similarly finds that asymmetric design is better for VAEs. While ViTok suggests that small encoders are optimal, we point out that in our setting scaling encoders is also beneficial. Notably, the reconstruction vs. generation dilemma for scaling visual tokenizers is not solved in ViTok. We hypothesize that adding semantic regularization may similarly help solve the tokenizer scaling dilemma for VAEs, but leave it for future study. + +MAGVIT-v2 [69] introduces LFQ to enhance discrete tokenizers. It also introduces the entropy penalty for tokenizer training, which is shown to be important for training large-scale tokenizers in our work. Instead of tokenizer model scaling, MAGVIT-v2 focuses more on scaling the codebook size of tokenizers. While codebook dimension and codebook size are important bottlenecks for visual tokenizers, we point out that model size scaling is also an important way for improving visual tokenizers. + +ImageFolder [36] utilizes two branches for image encoding to handle high-level semantic information and low-level + +visual details respectively. It seminally utilizes semantic alignment to enhance the learned representation of tokenizers. + +VA-VAE [65] tames the reconstruction vs. generation dilemma in increasing latent dimensions for continuous VAE [28, 29]. VA-VAE improves the reconstruction-generation Pareto Frontier by introducing vision foundation model alignment loss. In contrast, we seek continuous improvements in both reconstruction and generation by scaling tokenizers. Semantic regularization serves different purposes in the two works. \ No newline at end of file diff --git a/data/2025/2504_08xxx/2504.08736/images/0aabb2d424621a2772efa50fc41a2ba598fc16ac33f0b541e7ec1e13c62263ba.jpg b/data/2025/2504_08xxx/2504.08736/images/0aabb2d424621a2772efa50fc41a2ba598fc16ac33f0b541e7ec1e13c62263ba.jpg new file mode 100644 index 0000000000000000000000000000000000000000..94464af589d7d99e09940a03620be7c41c71746b --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/0aabb2d424621a2772efa50fc41a2ba598fc16ac33f0b541e7ec1e13c62263ba.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c4e7c1925091cb55caa401cd6748dc751f30010b21f2a6fb0afa37f7e76cdbfd +size 40864 diff --git a/data/2025/2504_08xxx/2504.08736/images/0c173660c2bed58e0acc195ee84ab6bec0a1321cd7227814a055257e9e31fd4a.jpg b/data/2025/2504_08xxx/2504.08736/images/0c173660c2bed58e0acc195ee84ab6bec0a1321cd7227814a055257e9e31fd4a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..029d26fcba193bda7bb95f42a1bd75934f43f319 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/0c173660c2bed58e0acc195ee84ab6bec0a1321cd7227814a055257e9e31fd4a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17e8c1ee05ada8e8cfee3aca1a91e3e4ca30834e938a64a8996dae80a8b2486e +size 44328 diff --git a/data/2025/2504_08xxx/2504.08736/images/18a6c7c53b14968caeb727bb6a912f7d19b3dcee70012feb02222289b7bdc5de.jpg b/data/2025/2504_08xxx/2504.08736/images/18a6c7c53b14968caeb727bb6a912f7d19b3dcee70012feb02222289b7bdc5de.jpg new file mode 100644 index 0000000000000000000000000000000000000000..383bb82967554c0dde45ea3825d8e7ac0dfc8efc --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/18a6c7c53b14968caeb727bb6a912f7d19b3dcee70012feb02222289b7bdc5de.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9289c64e500d32f2237f0461c8b4ade59ef7fbf3075e540ddbaca6d04b108f5c +size 11072 diff --git a/data/2025/2504_08xxx/2504.08736/images/2257c1fefec13ade9fff8b68b9354ff3fdf8dc5549b55947d1ab3e70a2d4d28e.jpg b/data/2025/2504_08xxx/2504.08736/images/2257c1fefec13ade9fff8b68b9354ff3fdf8dc5549b55947d1ab3e70a2d4d28e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..38c4869fffe7177da30aa988f7c2ef9406358da6 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/2257c1fefec13ade9fff8b68b9354ff3fdf8dc5549b55947d1ab3e70a2d4d28e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7293e8f1f018cdd691a0d359553f2156fbf89277670c83faa1f316f81c3b4b5 +size 11646 diff --git a/data/2025/2504_08xxx/2504.08736/images/2610462c1e32867a02d8209cfa0a22885a2e20c2ae3cb320c384a615f8ad054c.jpg b/data/2025/2504_08xxx/2504.08736/images/2610462c1e32867a02d8209cfa0a22885a2e20c2ae3cb320c384a615f8ad054c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..922ad6a22f687899418c4af573d3e109d827a936 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/2610462c1e32867a02d8209cfa0a22885a2e20c2ae3cb320c384a615f8ad054c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7aea8773d08c030a717fb9ae610d003db73ece3f27c6e9b0f477683b58b541c4 +size 9759 diff --git a/data/2025/2504_08xxx/2504.08736/images/274370f0250037eda8667d5c1a5297451a24c1e086f672ad0122c6e9cf19af1a.jpg b/data/2025/2504_08xxx/2504.08736/images/274370f0250037eda8667d5c1a5297451a24c1e086f672ad0122c6e9cf19af1a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..69451656d87dc1bb35c667d1c06b6baf67f15f67 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/274370f0250037eda8667d5c1a5297451a24c1e086f672ad0122c6e9cf19af1a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f53e9066bceac4d86abba0d4b63dd0c0933623838b64b7fcc37a05a499d45f8 +size 5518 diff --git a/data/2025/2504_08xxx/2504.08736/images/2a947eeb75e4a83e727c41d1f9c1ee05719590673ed1bddb84605054f0989839.jpg b/data/2025/2504_08xxx/2504.08736/images/2a947eeb75e4a83e727c41d1f9c1ee05719590673ed1bddb84605054f0989839.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c200be427b6677fba1541ebbac7f6f89305761ae --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/2a947eeb75e4a83e727c41d1f9c1ee05719590673ed1bddb84605054f0989839.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3473c62a8b3ede775b181330e51c7390e06559ea6a24f13a3611d077949e45e5 +size 26429 diff --git a/data/2025/2504_08xxx/2504.08736/images/31b1d65e2e767d9a05af9902ebe910c062397920ef6b942299dd20cf5271bfa2.jpg b/data/2025/2504_08xxx/2504.08736/images/31b1d65e2e767d9a05af9902ebe910c062397920ef6b942299dd20cf5271bfa2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..585f2079a30ebf620db09ac4f690650f98d5ece6 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/31b1d65e2e767d9a05af9902ebe910c062397920ef6b942299dd20cf5271bfa2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:816cd8e27878448100ffcafdb56e0b4a944fd94e3a41b32a8d80b43aff2855a2 +size 15571 diff --git a/data/2025/2504_08xxx/2504.08736/images/353f2db32bb48dc55d7ae5715b753a481a90eeff571048278ccfe5c6dbf05fa4.jpg b/data/2025/2504_08xxx/2504.08736/images/353f2db32bb48dc55d7ae5715b753a481a90eeff571048278ccfe5c6dbf05fa4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4282093cb1440ec22be6047a6e8540a80c1953ce --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/353f2db32bb48dc55d7ae5715b753a481a90eeff571048278ccfe5c6dbf05fa4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4014d2a08fab3def94e91e0d7be1b6e1bf36c5a0a2c42993ac908c7ea63b76b4 +size 10521 diff --git a/data/2025/2504_08xxx/2504.08736/images/368c793c150808e4ed2a72a13b21b120c092ed9773c0128a63932362974ebca5.jpg b/data/2025/2504_08xxx/2504.08736/images/368c793c150808e4ed2a72a13b21b120c092ed9773c0128a63932362974ebca5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d8d48283b811eadb1d64ef3a546d2cdeb73bbece --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/368c793c150808e4ed2a72a13b21b120c092ed9773c0128a63932362974ebca5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:725555f7dc33e3ae5bbf286a9b9ac40743855b306fcb2487c6be081c84439746 +size 16551 diff --git a/data/2025/2504_08xxx/2504.08736/images/3d5da0aa1a4a8a7f9e6fd7c5016ffb83c94f6b86859864d66000d82cdf735b24.jpg b/data/2025/2504_08xxx/2504.08736/images/3d5da0aa1a4a8a7f9e6fd7c5016ffb83c94f6b86859864d66000d82cdf735b24.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0348472a7631c1a18e7de26d81958ed487a5e964 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/3d5da0aa1a4a8a7f9e6fd7c5016ffb83c94f6b86859864d66000d82cdf735b24.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b291610396c38d4966320e6d8e2859c772e318e2ed0b21d5b8b9c2fedefaca65 +size 6686 diff --git a/data/2025/2504_08xxx/2504.08736/images/3e802de3a2022c636c39f13e07b3c7a49f1c95e42b86c55ecc2c4e86f8885e6d.jpg b/data/2025/2504_08xxx/2504.08736/images/3e802de3a2022c636c39f13e07b3c7a49f1c95e42b86c55ecc2c4e86f8885e6d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..da1969ad6a4529c1f9d8c6afa2b580eda98aabe1 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/3e802de3a2022c636c39f13e07b3c7a49f1c95e42b86c55ecc2c4e86f8885e6d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd89e4938945c4b2701586f4e718cdbc5f00b43ffc65a001436f936321a547a3 +size 9746 diff --git a/data/2025/2504_08xxx/2504.08736/images/407127a1c9516fe4df42fdca4780af32a20fdb67d771d49da601eecdcc3f5880.jpg b/data/2025/2504_08xxx/2504.08736/images/407127a1c9516fe4df42fdca4780af32a20fdb67d771d49da601eecdcc3f5880.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a24b214a7482dcc49133150bbc00d6989688ee0d --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/407127a1c9516fe4df42fdca4780af32a20fdb67d771d49da601eecdcc3f5880.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd5fbf845136c0894f0b8240dae437b3880c103e1f5bed4517bd520450b6314c +size 249439 diff --git a/data/2025/2504_08xxx/2504.08736/images/45da4b40e08de744a3a50b51d9ab95bb8b8df14ed43b4deffc897ea08a8800ef.jpg b/data/2025/2504_08xxx/2504.08736/images/45da4b40e08de744a3a50b51d9ab95bb8b8df14ed43b4deffc897ea08a8800ef.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e7dfebdbf20a49e8aaf41bdec2bb30d4fd14eae7 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/45da4b40e08de744a3a50b51d9ab95bb8b8df14ed43b4deffc897ea08a8800ef.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8594c1c326ad3a1b5cd7529a4739b07e6e4c4824524be4ad75cec5e117969c4 +size 4617 diff --git a/data/2025/2504_08xxx/2504.08736/images/4a851190f391968fff60efe2758f19f4554b97d7becbd7c5a587a2444a37eb3c.jpg b/data/2025/2504_08xxx/2504.08736/images/4a851190f391968fff60efe2758f19f4554b97d7becbd7c5a587a2444a37eb3c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..28e4f2509f0ecbfd45a46f7f06a75da1608a24d6 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/4a851190f391968fff60efe2758f19f4554b97d7becbd7c5a587a2444a37eb3c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e74746956bc795ff0b1d5130e25528d1ea353bbb926e9eec3b9b62846f5a3a5f +size 4500 diff --git a/data/2025/2504_08xxx/2504.08736/images/4a8ec5874098be40d99d09fbcc46ef153fce31d4907340b386d0ec15c364716f.jpg b/data/2025/2504_08xxx/2504.08736/images/4a8ec5874098be40d99d09fbcc46ef153fce31d4907340b386d0ec15c364716f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..16963769cdd07aa533bfaceea0789580a379d368 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/4a8ec5874098be40d99d09fbcc46ef153fce31d4907340b386d0ec15c364716f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d30cd9effc155c7b2ecc2c7c95a2e38945be6e86733470bef999e0ded8468df1 +size 126913 diff --git a/data/2025/2504_08xxx/2504.08736/images/4c7ed3e7acb8d6a5d8a3737863302c3fcc97ce6fe3d7eebe9cf13c07e2792d61.jpg b/data/2025/2504_08xxx/2504.08736/images/4c7ed3e7acb8d6a5d8a3737863302c3fcc97ce6fe3d7eebe9cf13c07e2792d61.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fb2343e39187337ab497eb2ce32ca7ac53cf822e --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/4c7ed3e7acb8d6a5d8a3737863302c3fcc97ce6fe3d7eebe9cf13c07e2792d61.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:facff33ab9ddddbca63670d0d7f424236d4ade41040bcd146a229bcf7686d73d +size 14403 diff --git a/data/2025/2504_08xxx/2504.08736/images/501724ecdc813e1787bc98b3a565459f1ad00649558af51fafe3505ee36c94c5.jpg b/data/2025/2504_08xxx/2504.08736/images/501724ecdc813e1787bc98b3a565459f1ad00649558af51fafe3505ee36c94c5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..53558e49cf9ed0e6cbeb9a5ef9a187b2705c9974 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/501724ecdc813e1787bc98b3a565459f1ad00649558af51fafe3505ee36c94c5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c84b813b24ff2211bf019c50883ab0430b846b22bad2dcfae9201ad1104dd491 +size 16215 diff --git a/data/2025/2504_08xxx/2504.08736/images/55b90d7df7255b979b3f0cfa226e89b47598a89009decc9c68523c8829374359.jpg b/data/2025/2504_08xxx/2504.08736/images/55b90d7df7255b979b3f0cfa226e89b47598a89009decc9c68523c8829374359.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a7781b91753933b4d79c523bf554d65dd40c9ffd --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/55b90d7df7255b979b3f0cfa226e89b47598a89009decc9c68523c8829374359.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3257b4d88b7f7537eecbdc9e446d644533d4b28b8c87df2bbe1430bfc263205a +size 26439 diff --git a/data/2025/2504_08xxx/2504.08736/images/5b9beeef0eac54b87f8240947c7f51792dba750f90fc05d1fd7ac427dc9bfe16.jpg b/data/2025/2504_08xxx/2504.08736/images/5b9beeef0eac54b87f8240947c7f51792dba750f90fc05d1fd7ac427dc9bfe16.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a9212b3f134ced8bd4a07f951a2caedd6ff92c2f --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/5b9beeef0eac54b87f8240947c7f51792dba750f90fc05d1fd7ac427dc9bfe16.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f63e5436f7c5992e763626c152b20f3e5091b5987d7ff45760fd8abe16cb758d +size 2457 diff --git a/data/2025/2504_08xxx/2504.08736/images/67606d8a17a8e261fbda24cf1eb68ba0a5931c3eb125b78425daa2c2a6b086a8.jpg b/data/2025/2504_08xxx/2504.08736/images/67606d8a17a8e261fbda24cf1eb68ba0a5931c3eb125b78425daa2c2a6b086a8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5b3893d402dc0a7686f8e777ac878cbeaced6142 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/67606d8a17a8e261fbda24cf1eb68ba0a5931c3eb125b78425daa2c2a6b086a8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0dcf83d2c0d78cb6faeb682ebbbaeaceaa3058d46a1af1a2f1c533b76050ea33 +size 11156 diff --git a/data/2025/2504_08xxx/2504.08736/images/6bae227554a03c1775492f7d81adf5e2f053dba39d655da7c38297c685182938.jpg b/data/2025/2504_08xxx/2504.08736/images/6bae227554a03c1775492f7d81adf5e2f053dba39d655da7c38297c685182938.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d82883807f34e689a272faa33418a77b38a7d709 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/6bae227554a03c1775492f7d81adf5e2f053dba39d655da7c38297c685182938.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5bd2c7cff182999b478e33d2044d54a882b1d61aff25079a6cdc23fc73d6784f +size 15969 diff --git a/data/2025/2504_08xxx/2504.08736/images/6d6262b791a8ccca0ad91111cb5a0b1313980e427f8b7c97c20147297de64888.jpg b/data/2025/2504_08xxx/2504.08736/images/6d6262b791a8ccca0ad91111cb5a0b1313980e427f8b7c97c20147297de64888.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3df0453329fe0909c423a0e93278a0dc42e96c12 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/6d6262b791a8ccca0ad91111cb5a0b1313980e427f8b7c97c20147297de64888.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:37b4db1cd9f3475151b7a05f9933d4c2703e5703d8e4386ab49d118c18661caf +size 23549 diff --git a/data/2025/2504_08xxx/2504.08736/images/700aba12e34d4e0774e33b78bc1075413f3a17d8a9652afd2b5f318082fa9283.jpg b/data/2025/2504_08xxx/2504.08736/images/700aba12e34d4e0774e33b78bc1075413f3a17d8a9652afd2b5f318082fa9283.jpg new file mode 100644 index 0000000000000000000000000000000000000000..35b872430b6f009153e68ddd60b1260f3c8cd355 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/700aba12e34d4e0774e33b78bc1075413f3a17d8a9652afd2b5f318082fa9283.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70b73866ed6c02239afbf23f9f82497d0f2540d8563038227dfdc1f4131adec3 +size 3382 diff --git a/data/2025/2504_08xxx/2504.08736/images/7679cbf817b12edaa14706b6831569f0b7f4575518cabf088803fec2247636ff.jpg b/data/2025/2504_08xxx/2504.08736/images/7679cbf817b12edaa14706b6831569f0b7f4575518cabf088803fec2247636ff.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c1a941d3a37880c706d80249d16f5a4125482f84 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/7679cbf817b12edaa14706b6831569f0b7f4575518cabf088803fec2247636ff.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a8272daa03287775b25cb6c3e568b52d801596240af650607d56664875f3053 +size 29166 diff --git a/data/2025/2504_08xxx/2504.08736/images/77cf18f6ca5c4e6b0b0a0e25d4e49d3417732c35d65185fce0b503e3a22f5c32.jpg b/data/2025/2504_08xxx/2504.08736/images/77cf18f6ca5c4e6b0b0a0e25d4e49d3417732c35d65185fce0b503e3a22f5c32.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1410417dfbcca548ac0e504fcc4b3970e4031993 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/77cf18f6ca5c4e6b0b0a0e25d4e49d3417732c35d65185fce0b503e3a22f5c32.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09e48b9e9b5cddf958c053bac4fa6e2db8656ea9574b1eba8b6c4d68de1d206f +size 4882 diff --git a/data/2025/2504_08xxx/2504.08736/images/79b53a77bb3a6bf0032d29e80aaf661efdeae855aaf6a8344064a59663aa038f.jpg b/data/2025/2504_08xxx/2504.08736/images/79b53a77bb3a6bf0032d29e80aaf661efdeae855aaf6a8344064a59663aa038f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..be1130869d0ef3eafacd0593ef57db0ad760ccaa --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/79b53a77bb3a6bf0032d29e80aaf661efdeae855aaf6a8344064a59663aa038f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5629bbb4fcb32a904656b11c30f60e6c01c58f6084b24681046507c16f77a23 +size 30088 diff --git a/data/2025/2504_08xxx/2504.08736/images/86e9797e3f6e778b4e27c04bb9c93e8c669942a1dfc25453926910afa82086d5.jpg b/data/2025/2504_08xxx/2504.08736/images/86e9797e3f6e778b4e27c04bb9c93e8c669942a1dfc25453926910afa82086d5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fa9d2bc03bb059de71df9c97e4cff05962a9e91f --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/86e9797e3f6e778b4e27c04bb9c93e8c669942a1dfc25453926910afa82086d5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a245c838fb0b706f5bee98816ff59a04bcfce82a5c29dbbe3166bf9b7609d6e +size 19097 diff --git a/data/2025/2504_08xxx/2504.08736/images/87c6f73875da6f2e2c33fde3ca1f08f894833374e305624ad1c2334895a90f12.jpg b/data/2025/2504_08xxx/2504.08736/images/87c6f73875da6f2e2c33fde3ca1f08f894833374e305624ad1c2334895a90f12.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2829c542a6ab0da2219af0f1207ea45ec892d65c --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/87c6f73875da6f2e2c33fde3ca1f08f894833374e305624ad1c2334895a90f12.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe726acad7ffa89cb49fe7542107110027e7e670e25eddfa8c7c6fc81c367b67 +size 5105 diff --git a/data/2025/2504_08xxx/2504.08736/images/957829c2bd5cc39a6f9d659e080757c607882628c29f8d7313cc9801a5dce8e6.jpg b/data/2025/2504_08xxx/2504.08736/images/957829c2bd5cc39a6f9d659e080757c607882628c29f8d7313cc9801a5dce8e6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8eedb751f7fc718be9e036c8f891c5dc80b84198 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/957829c2bd5cc39a6f9d659e080757c607882628c29f8d7313cc9801a5dce8e6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb0550cc5db03b82ec87f9fa6a1de37231edd46bd954c2f11c5fb54d08a61bd0 +size 7117 diff --git a/data/2025/2504_08xxx/2504.08736/images/9a5879ed81b192743b97092ab73ca4b3a7b630621c05af234a340dc12fc53b01.jpg b/data/2025/2504_08xxx/2504.08736/images/9a5879ed81b192743b97092ab73ca4b3a7b630621c05af234a340dc12fc53b01.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4aa3dc2eaa2246e1cb3950b16fc1f9b3322a54b3 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/9a5879ed81b192743b97092ab73ca4b3a7b630621c05af234a340dc12fc53b01.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:124f038a014bc67f73a2857dc6c73aa284ced4888f67d48f63f948f9ab2fcc8c +size 9069 diff --git a/data/2025/2504_08xxx/2504.08736/images/9acdec04d8802aac59b5a78b7e590b8e1c81de2dec608a2de7b670863a259d32.jpg b/data/2025/2504_08xxx/2504.08736/images/9acdec04d8802aac59b5a78b7e590b8e1c81de2dec608a2de7b670863a259d32.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f09976e42333fd906fbc07c32d9c25e2263dfba8 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/9acdec04d8802aac59b5a78b7e590b8e1c81de2dec608a2de7b670863a259d32.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58628b64c3cf9afedd89a384fba1d60a4ace2d7ec721df239ab5419035515d3a +size 11026 diff --git a/data/2025/2504_08xxx/2504.08736/images/9beeaae878ebd25ec7a8ed67c02f768536aee94a7f93708f963790666b9054d6.jpg b/data/2025/2504_08xxx/2504.08736/images/9beeaae878ebd25ec7a8ed67c02f768536aee94a7f93708f963790666b9054d6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1ba40b6f9f19d251110178b7610aaa5c6ff69798 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/9beeaae878ebd25ec7a8ed67c02f768536aee94a7f93708f963790666b9054d6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51bc1b2126e78cffa630cea85dc15d0033a3ae2c5cb90b45f77921dbeab11827 +size 8398 diff --git a/data/2025/2504_08xxx/2504.08736/images/a0b2c71f7aa19b9521226367fa6f7ed0a44912ef10f9158ae65713269a8c6225.jpg b/data/2025/2504_08xxx/2504.08736/images/a0b2c71f7aa19b9521226367fa6f7ed0a44912ef10f9158ae65713269a8c6225.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f8de147ab472f3df60b102422e5c522cbe8b5623 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/a0b2c71f7aa19b9521226367fa6f7ed0a44912ef10f9158ae65713269a8c6225.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0038d3c5859ea4dd79b536f865e4f5975c8343548b336a53138096ccc9730bf +size 13278 diff --git a/data/2025/2504_08xxx/2504.08736/images/a2be56996275f976cf1f457e4c8dd1af6d6f1e9a7b132e726a12ef2c4f015402.jpg b/data/2025/2504_08xxx/2504.08736/images/a2be56996275f976cf1f457e4c8dd1af6d6f1e9a7b132e726a12ef2c4f015402.jpg new file mode 100644 index 0000000000000000000000000000000000000000..66e8f08f61dd9d74810a4b59d52798e3d399f56b --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/a2be56996275f976cf1f457e4c8dd1af6d6f1e9a7b132e726a12ef2c4f015402.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02e9691174201b3c83f9c01883cc9b6dbc5c5137f15ebea86e2f4b0c5dd78abe +size 98454 diff --git a/data/2025/2504_08xxx/2504.08736/images/a74a26f6ca59a171d98ac0daf44e41e2db54df84c3c51938647e5d2387d887a0.jpg b/data/2025/2504_08xxx/2504.08736/images/a74a26f6ca59a171d98ac0daf44e41e2db54df84c3c51938647e5d2387d887a0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9441273a40b9dfc2f5ff6a678b9724273b5eab6e --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/a74a26f6ca59a171d98ac0daf44e41e2db54df84c3c51938647e5d2387d887a0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f32a228a1f83f2d5a9ce015648559931c1d152b0c752067485696351c4655bc5 +size 19162 diff --git a/data/2025/2504_08xxx/2504.08736/images/aa782d3f324e1ed9e8dfe46438225cb4c23c64580e41df85bdd83c38411467d8.jpg b/data/2025/2504_08xxx/2504.08736/images/aa782d3f324e1ed9e8dfe46438225cb4c23c64580e41df85bdd83c38411467d8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..94075e0dc2309453db19799dc82172ce68ca2918 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/aa782d3f324e1ed9e8dfe46438225cb4c23c64580e41df85bdd83c38411467d8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ae7322959c32e58a0df204f0a8318cf02e09250891b93d7eba6399927b07fc3 +size 14211 diff --git a/data/2025/2504_08xxx/2504.08736/images/b397294c7c93b5ced9f442cccae73fbd6a03a99b75c046a878da0d7cb5af83a2.jpg b/data/2025/2504_08xxx/2504.08736/images/b397294c7c93b5ced9f442cccae73fbd6a03a99b75c046a878da0d7cb5af83a2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..957748affa2eeed566a76ec51e36e20aae31e29f --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/b397294c7c93b5ced9f442cccae73fbd6a03a99b75c046a878da0d7cb5af83a2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c58d8b2e0ca439f6e26b391f9d1d94d3a8f4ea6073d65e5d5b6ff877b24c173 +size 12637 diff --git a/data/2025/2504_08xxx/2504.08736/images/b705830c6e5c612a3f65b4102bf1f7f236bd0886aafb32e0701e980d97987c13.jpg b/data/2025/2504_08xxx/2504.08736/images/b705830c6e5c612a3f65b4102bf1f7f236bd0886aafb32e0701e980d97987c13.jpg new file mode 100644 index 0000000000000000000000000000000000000000..44fa0eba8ab701e8fae508409234b8addb311d60 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/b705830c6e5c612a3f65b4102bf1f7f236bd0886aafb32e0701e980d97987c13.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a623cf4df4470ecc599406dd6f507907bfcc92a0cfdc7408048ca3ed00c1891 +size 8387 diff --git a/data/2025/2504_08xxx/2504.08736/images/bb78f583482a5c116fc43be783712b1b6b60655b1a1addd0864eeffddc52cd97.jpg b/data/2025/2504_08xxx/2504.08736/images/bb78f583482a5c116fc43be783712b1b6b60655b1a1addd0864eeffddc52cd97.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7103ab11f75bececa6a62e8359330e20c1f5dfad --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/bb78f583482a5c116fc43be783712b1b6b60655b1a1addd0864eeffddc52cd97.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7895a14df4682eedc2bce83729b8b385ff88040ce12c1bdff5709b730074ae65 +size 3879 diff --git a/data/2025/2504_08xxx/2504.08736/images/bd97cce6fd6b87446461e6eb3df1ebedede28a455da8eb46153e9b4f34ceeb2b.jpg b/data/2025/2504_08xxx/2504.08736/images/bd97cce6fd6b87446461e6eb3df1ebedede28a455da8eb46153e9b4f34ceeb2b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fa8450a7efc5a449f003bc9eafe8bc74852423c4 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/bd97cce6fd6b87446461e6eb3df1ebedede28a455da8eb46153e9b4f34ceeb2b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:acad0799b356810f6de593995e33fbd68704893a31fa4a19e5218de08d83d322 +size 27124 diff --git a/data/2025/2504_08xxx/2504.08736/images/c26fc826026f4d0cbd93db0771b9f0580709032d81e1bda4477e2c5a153495cd.jpg b/data/2025/2504_08xxx/2504.08736/images/c26fc826026f4d0cbd93db0771b9f0580709032d81e1bda4477e2c5a153495cd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..666577c70198ee78b24ddfdc6ee8d6513431e4e3 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/c26fc826026f4d0cbd93db0771b9f0580709032d81e1bda4477e2c5a153495cd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:92708665698662e472b47d348fdfcdb2b967a2816afab0fe9a1ba722ad005ac3 +size 4604 diff --git a/data/2025/2504_08xxx/2504.08736/images/c3261129c9a4a3015e129369727cb9928d0a5508af5ef7763dbd5e72adc541d6.jpg b/data/2025/2504_08xxx/2504.08736/images/c3261129c9a4a3015e129369727cb9928d0a5508af5ef7763dbd5e72adc541d6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7b7e082ce9700fa95dfa9e3bafeef017236617e0 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/c3261129c9a4a3015e129369727cb9928d0a5508af5ef7763dbd5e72adc541d6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cbc76f451df54b90fa2c54e53888cc04d0d626a63ea1492075abbd140582d8fb +size 9546 diff --git a/data/2025/2504_08xxx/2504.08736/images/c430d54c28f1d7deb48c5ced503e66bfc7c3f7e549a1d36977dcd91509812eac.jpg b/data/2025/2504_08xxx/2504.08736/images/c430d54c28f1d7deb48c5ced503e66bfc7c3f7e549a1d36977dcd91509812eac.jpg new file mode 100644 index 0000000000000000000000000000000000000000..09b793933bf852495c90817b8c20dd13f7edb69f --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/c430d54c28f1d7deb48c5ced503e66bfc7c3f7e549a1d36977dcd91509812eac.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03ad0a5a8d09a73db76123b0a8d473dadc942697a09b13473fb986bb37f1e439 +size 101264 diff --git a/data/2025/2504_08xxx/2504.08736/images/d13b14d845494dadf9a20a896b770459572d94b3dd032d5d792021b54867189b.jpg b/data/2025/2504_08xxx/2504.08736/images/d13b14d845494dadf9a20a896b770459572d94b3dd032d5d792021b54867189b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..076996e03da515b0c07b62625b06c1625d5c8b57 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/d13b14d845494dadf9a20a896b770459572d94b3dd032d5d792021b54867189b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:caccfa74b594ca8661cf0ab14864398bdc459a738d1034ce9fe6c51e179c21aa +size 18000 diff --git a/data/2025/2504_08xxx/2504.08736/images/e205c5e0363951781746ffb7f15f0fea7dee10370ed96939e69739e03db2597f.jpg b/data/2025/2504_08xxx/2504.08736/images/e205c5e0363951781746ffb7f15f0fea7dee10370ed96939e69739e03db2597f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..93aee71fc04deec095f1b86e3bed32ddf41e2e43 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/e205c5e0363951781746ffb7f15f0fea7dee10370ed96939e69739e03db2597f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e0f84cefc8e1c46e00ebaa97ebe5e2f18d1494186a0c69f1cb1ba497806a73c +size 11656 diff --git a/data/2025/2504_08xxx/2504.08736/images/e84d8dee75edb0b5e1b5359de086cf50cb99bf3269dd86ed1d8aeaf34ef19742.jpg b/data/2025/2504_08xxx/2504.08736/images/e84d8dee75edb0b5e1b5359de086cf50cb99bf3269dd86ed1d8aeaf34ef19742.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0f612643f074bad1c7437e506bffebc45458c35f --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/e84d8dee75edb0b5e1b5359de086cf50cb99bf3269dd86ed1d8aeaf34ef19742.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7b881176a216127e1e903b681c9a0f64b3ec168b96c6727b12dd92217853613 +size 5255 diff --git a/data/2025/2504_08xxx/2504.08736/images/ed48fc0f3f77e7ab087ee9a1c9566f902d55b4209f29ae02a5ae2ec1ca6117c6.jpg b/data/2025/2504_08xxx/2504.08736/images/ed48fc0f3f77e7ab087ee9a1c9566f902d55b4209f29ae02a5ae2ec1ca6117c6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fe3afcee5c276ffbea9a283ec90502995dd0bf6a --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/ed48fc0f3f77e7ab087ee9a1c9566f902d55b4209f29ae02a5ae2ec1ca6117c6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa691b4cb3e172f937bf51f660889f94642f5efc213bb39224fb49e889c6e66d +size 8922 diff --git a/data/2025/2504_08xxx/2504.08736/images/ef14e0107eeace631870127030a6ebc25126aabfd00217ebbda2c0dfe9e87aee.jpg b/data/2025/2504_08xxx/2504.08736/images/ef14e0107eeace631870127030a6ebc25126aabfd00217ebbda2c0dfe9e87aee.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8723ea3ebcec85c58f5b032a958d2ef8e32125c1 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/ef14e0107eeace631870127030a6ebc25126aabfd00217ebbda2c0dfe9e87aee.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b76e1349e31ceaca5ccd22fbfa9c76e5e2ac1c3ada09e3189037c668ec1cd57 +size 7317 diff --git a/data/2025/2504_08xxx/2504.08736/images/f4034ae46c2d3aa8bd499318635ef2d35fe3c4609eb808a740ddf39df9fd08b0.jpg b/data/2025/2504_08xxx/2504.08736/images/f4034ae46c2d3aa8bd499318635ef2d35fe3c4609eb808a740ddf39df9fd08b0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..22596ac04342bc7cdfc353de8c1a24d8293e2773 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/f4034ae46c2d3aa8bd499318635ef2d35fe3c4609eb808a740ddf39df9fd08b0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af64325bf12820112d314d14e2b77bf475895f7628aded03b2fddc65b48e5caa +size 24842 diff --git a/data/2025/2504_08xxx/2504.08736/images/f8643373abb334d25b9757a02516d0285a771299f840593828f0c30f059ad67d.jpg b/data/2025/2504_08xxx/2504.08736/images/f8643373abb334d25b9757a02516d0285a771299f840593828f0c30f059ad67d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..89eb3b6c639a0c4ac99dd6b2ccc837dd508f5ffc --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/images/f8643373abb334d25b9757a02516d0285a771299f840593828f0c30f059ad67d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bdf9d7a834c829c0e614bc943f028a2618c68ae7ddfa9b8d7584ac759b6a9830 +size 10115 diff --git a/data/2025/2504_08xxx/2504.08736/layout.json b/data/2025/2504_08xxx/2504.08736/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..3a7dea3e2915689d09a5242df7e6a820d95afc4b --- /dev/null +++ b/data/2025/2504_08xxx/2504.08736/layout.json @@ -0,0 +1,11819 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 122, + 103, + 489, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 103, + 489, + 140 + ], + "spans": [ + { + "bbox": [ + 122, + 103, + 489, + 140 + ], + "type": "text", + "content": "GigaTok: Scaling Visual Tokenizers to 3 Billion Parameters for Autoregressive Image Generation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 82, + 160, + 160, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 160, + 160, + 175 + ], + "spans": [ + { + "bbox": [ + 82, + 160, + 160, + 175 + ], + "type": "text", + "content": "Tianwei Xiong" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 186, + 161, + 260, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 161, + 260, + 175 + ], + "spans": [ + { + "bbox": [ + 186, + 161, + 260, + 175 + ], + "type": "text", + "content": "Jun Hao Liew2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 285, + 161, + 358, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 285, + 161, + 358, + 175 + ], + "spans": [ + { + "bbox": [ + 285, + 161, + 358, + 175 + ], + "type": "text", + "content": "Zilong Huang" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 384, + 161, + 444, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 384, + 161, + 444, + 175 + ], + "spans": [ + { + "bbox": [ + 384, + 161, + 444, + 175 + ], + "type": "text", + "content": "Jiashi Feng2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 470, + 161, + 527, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 161, + 527, + 175 + ], + "spans": [ + { + "bbox": [ + 470, + 161, + 527, + 175 + ], + "type": "text", + "content": "Xihui Liu" + }, + { + "bbox": [ + 470, + 161, + 527, + 175 + ], + "type": "inline_equation", + "content": "^{1\\dagger}" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 168, + 175, + 317, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 168, + 175, + 317, + 190 + ], + "spans": [ + { + "bbox": [ + 168, + 175, + 317, + 190 + ], + "type": "text", + "content": "1The University of Hong Kong" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 357, + 175, + 443, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 357, + 175, + 443, + 189 + ], + "spans": [ + { + "bbox": [ + 357, + 175, + 443, + 189 + ], + "type": "text", + "content": "2ByteDance Seed" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 182, + 190, + 425, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 182, + 190, + 425, + 203 + ], + "spans": [ + { + "bbox": [ + 182, + 190, + 425, + 203 + ], + "type": "text", + "content": "Project page: https://silentview.github.io/GigaTok/" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 152, + 231, + 199, + 243 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 231, + 199, + 243 + ], + "spans": [ + { + "bbox": [ + 152, + 231, + 199, + 243 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 256, + 296, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 256, + 296, + 568 + ], + "spans": [ + { + "bbox": [ + 55, + 256, + 296, + 568 + ], + "type": "text", + "content": "In autoregressive (AR) image generation, visual tokenizers compress images into compact discrete latent tokens, enabling efficient training of downstream autoregressive models for visual generation via next-token prediction. While scaling visual tokenizers improves image reconstruction quality, it often degrades downstream generation quality—a challenge not adequately addressed in existing literature. To address this, we introduce GigaTok, the first approach to simultaneously improve image reconstruction, generation, and representation learning when scaling visual tokenizers. We identify the growing complexity of latent space as the key factor behind the reconstruction vs. generation dilemma. To mitigate this, we propose semantic regularization, which aligns tokenizer features with semantically consistent features from a pre-trained visual encoder. This constraint prevents excessive latent space complexity during scaling, yielding consistent improvements in both reconstruction and downstream autoregressive generation. Building on semantic regularization, we explore three key practices for scaling tokenizers: (1) using 1D tokenizers for better scalability, (2) prioritizing decoder scaling when expanding both encoder and decoder, and (3) employing entropy loss to stabilize training for billion-scale tokenizers. By scaling to 3 billion parameters, GigaTok achieves state-of-the-art performance in reconstruction, downstream AR generation, and downstream AR representation quality." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 590, + 135, + 602 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 590, + 135, + 602 + ], + "spans": [ + { + "bbox": [ + 56, + 590, + 135, + 602 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 55, + 610, + 295, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 610, + 295, + 694 + ], + "spans": [ + { + "bbox": [ + 55, + 610, + 295, + 694 + ], + "type": "text", + "content": "Autoregressive (AR) language models (LM) have emerged as a promising approach for visual generation [15, 50, 66, 69], driven by their proven scalability [2, 5, 14, 19, 37, 51, 52, 54, 55] and the potential for unified multimodal modeling [12, 45, 62]. The AR image generation framework consists of a visual tokenizer and a downstream AR generator. The tokenizer encodes images into discrete tokens, trained" + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 318, + 232, + 381, + 290 + ], + "blocks": [ + { + "bbox": [ + 318, + 232, + 381, + 290 + ], + "lines": [ + { + "bbox": [ + 318, + 232, + 381, + 290 + ], + "spans": [ + { + "bbox": [ + 318, + 232, + 381, + 290 + ], + "type": "image", + "image_path": "b705830c6e5c612a3f65b4102bf1f7f236bd0886aafb32e0701e980d97987c13.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 383, + 234, + 425, + 289 + ], + "blocks": [ + { + "bbox": [ + 383, + 234, + 425, + 289 + ], + "lines": [ + { + "bbox": [ + 383, + 234, + 425, + 289 + ], + "spans": [ + { + "bbox": [ + 383, + 234, + 425, + 289 + ], + "type": "image", + "image_path": "274370f0250037eda8667d5c1a5297451a24c1e086f672ad0122c6e9cf19af1a.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 428, + 234, + 463, + 289 + ], + "blocks": [ + { + "bbox": [ + 428, + 234, + 463, + 289 + ], + "lines": [ + { + "bbox": [ + 428, + 234, + 463, + 289 + ], + "spans": [ + { + "bbox": [ + 428, + 234, + 463, + 289 + ], + "type": "image", + "image_path": "4a851190f391968fff60efe2758f19f4554b97d7becbd7c5a587a2444a37eb3c.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 466, + 234, + 507, + 289 + ], + "blocks": [ + { + "bbox": [ + 466, + 234, + 507, + 289 + ], + "lines": [ + { + "bbox": [ + 466, + 234, + 507, + 289 + ], + "spans": [ + { + "bbox": [ + 466, + 234, + 507, + 289 + ], + "type": "image", + "image_path": "87c6f73875da6f2e2c33fde3ca1f08f894833374e305624ad1c2334895a90f12.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 508, + 234, + 549, + 289 + ], + "blocks": [ + { + "bbox": [ + 508, + 234, + 549, + 289 + ], + "lines": [ + { + "bbox": [ + 508, + 234, + 549, + 289 + ], + "spans": [ + { + "bbox": [ + 508, + 234, + 549, + 289 + ], + "type": "image", + "image_path": "77cf18f6ca5c4e6b0b0a0e25d4e49d3417732c35d65185fce0b503e3a22f5c32.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 318, + 294, + 381, + 392 + ], + "blocks": [ + { + "bbox": [ + 318, + 294, + 381, + 392 + ], + "lines": [ + { + "bbox": [ + 318, + 294, + 381, + 392 + ], + "spans": [ + { + "bbox": [ + 318, + 294, + 381, + 392 + ], + "type": "image", + "image_path": "353f2db32bb48dc55d7ae5715b753a481a90eeff571048278ccfe5c6dbf05fa4.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 409, + 555, + 464 + ], + "lines": [ + { + "bbox": [ + 313, + 409, + 555, + 464 + ], + "spans": [ + { + "bbox": [ + 313, + 409, + 555, + 464 + ], + "type": "text", + "content": "Figure 1. Reconstruction vs. generation dilemma: Naively scaling visual tokenizers achieves better reconstruction but degrades downstream autoregressive (AR) generation. In contrast, GigaTok achieves better performance for both reconstruction and generation as tokenizers scale up." + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 384, + 294, + 422, + 335 + ], + "blocks": [ + { + "bbox": [ + 384, + 294, + 422, + 335 + ], + "lines": [ + { + "bbox": [ + 384, + 294, + 422, + 335 + ], + "spans": [ + { + "bbox": [ + 384, + 294, + 422, + 335 + ], + "type": "image", + "image_path": "bb78f583482a5c116fc43be783712b1b6b60655b1a1addd0864eeffddc52cd97.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 424, + 294, + 463, + 390 + ], + "blocks": [ + { + "bbox": [ + 424, + 294, + 463, + 390 + ], + "lines": [ + { + "bbox": [ + 424, + 294, + 463, + 390 + ], + "spans": [ + { + "bbox": [ + 424, + 294, + 463, + 390 + ], + "type": "image", + "image_path": "9a5879ed81b192743b97092ab73ca4b3a7b630621c05af234a340dc12fc53b01.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 440, + 390, + 531, + 397 + ], + "lines": [ + { + "bbox": [ + 440, + 390, + 531, + 397 + ], + "spans": [ + { + "bbox": [ + 440, + 390, + 531, + 397 + ], + "type": "text", + "content": "Better generation with larger tokenizer" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 465, + 294, + 506, + 390 + ], + "blocks": [ + { + "bbox": [ + 465, + 294, + 506, + 390 + ], + "lines": [ + { + "bbox": [ + 465, + 294, + 506, + 390 + ], + "spans": [ + { + "bbox": [ + 465, + 294, + 506, + 390 + ], + "type": "image", + "image_path": "67606d8a17a8e261fbda24cf1eb68ba0a5931c3eb125b78425daa2c2a6b086a8.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 508, + 294, + 549, + 390 + ], + "blocks": [ + { + "bbox": [ + 508, + 294, + 549, + 390 + ], + "lines": [ + { + "bbox": [ + 508, + 294, + 549, + 390 + ], + "spans": [ + { + "bbox": [ + 508, + 294, + 549, + 390 + ], + "type": "image", + "image_path": "ed48fc0f3f77e7ab087ee9a1c9566f902d55b4209f29ae02a5ae2ec1ca6117c6.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + } + ], + "index": 22 + }, + { + "bbox": [ + 313, + 481, + 554, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 481, + 554, + 553 + ], + "spans": [ + { + "bbox": [ + 313, + 481, + 554, + 553 + ], + "type": "text", + "content": "with image reconstruction supervision, while the AR generator models the distribution of these discrete tokens through next-token prediction. The image tokenizer plays a pivotal role in AR visual generation, providing a compact and expressive latent space that enables effective generative modeling by downstream AR models." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 313, + 555, + 555, + 699 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 555, + 555, + 699 + ], + "spans": [ + { + "bbox": [ + 313, + 555, + 555, + 699 + ], + "type": "text", + "content": "Despite its pivotal role, scaling of visual tokenizer is rarely explored in the literature. In fact, unlike the downstream AR models whose scalability has been widely validated [12, 30, 60, 62], scaling the visual tokenizer presents a significant challenge. Specifically, there exists a reconstruction vs. generation dilemma, where scaling tokenizer improves reconstruction fidelity but degrades downstream generation quality, as shown in Fig. 1. This dilemma is also observed in prior works [13, 21]. In this work, we seek to overcome this limitation and explore strategies for effectively scaling tokenizers to enhance both reconstruction and generation performance." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 326, + 701, + 553, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 701, + 553, + 714 + ], + "spans": [ + { + "bbox": [ + 326, + 701, + 553, + 714 + ], + "type": "text", + "content": "To investigate the root cause of this dilemma, we propose" + } + ] + } + ], + "index": 27 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 217, + 36, + 574 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 217, + 36, + 574 + ], + "spans": [ + { + "bbox": [ + 14, + 217, + 36, + 574 + ], + "type": "text", + "content": "arXiv:2504.08736v2 [cs.CV] 24 Aug 2025" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 56, + 702, + 137, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 702, + 137, + 712 + ], + "spans": [ + { + "bbox": [ + 56, + 702, + 137, + 712 + ], + "type": "text", + "content": "† Corresponding Author." + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 58, + 72, + 553, + 238 + ], + "blocks": [ + { + "bbox": [ + 58, + 72, + 553, + 238 + ], + "lines": [ + { + "bbox": [ + 58, + 72, + 553, + 238 + ], + "spans": [ + { + "bbox": [ + 58, + 72, + 553, + 238 + ], + "type": "image", + "image_path": "4a8ec5874098be40d99d09fbcc46ef153fce31d4907340b386d0ec15c364716f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 59, + 248, + 549, + 260 + ], + "lines": [ + { + "bbox": [ + 59, + 248, + 549, + 260 + ], + "spans": [ + { + "bbox": [ + 59, + 248, + 549, + 260 + ], + "type": "text", + "content": "Figure 2. The 2.9B GigaTok achieves SOTA autoregressive image generation with a 1.4B AR model on ImageNet " + }, + { + "bbox": [ + 59, + 248, + 549, + 260 + ], + "type": "inline_equation", + "content": "256\\times 256" + }, + { + "bbox": [ + 59, + 248, + 549, + 260 + ], + "type": "text", + "content": " resolution." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 281, + 295, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 281, + 295, + 376 + ], + "spans": [ + { + "bbox": [ + 55, + 281, + 295, + 376 + ], + "type": "text", + "content": "an AR probing scheme that trains a lightweight downstream generative AR model to monitor the tokenizer's training process. Surprisingly, we find that as tokenizers scale, the downstream AR model struggles more to learn the resulting token distribution, as evidenced by the increasing AR generation loss. This suggests that the larger tokenizers produce a more complex token space, making it increasingly difficult for AR models to learn effectively." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 378, + 295, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 378, + 295, + 617 + ], + "spans": [ + { + "bbox": [ + 55, + 378, + 295, + 617 + ], + "type": "text", + "content": "To address this challenge, we introduce pre-trained visual representation models (e.g. DINOv2 [43]) to regularize tokenizers. Specifically, we leverage a semantic regularization loss during tokenizer training, encouraging high similarity between tokenizer features and the pre-trained model features. Such regularization helps constrain the latent space complexity, preventing the tokenizer from learning overly complicated latent token dependencies that hinder downstream AR generative modeling. Moreover, we design a vector-quantized (VQ) tokenizer with a hybrid CNN-Transformer architecture as the backbone, suitable for both 1D and 2D tokenizers, and explore best practices for scaling tokenizers: (1) 1D tokenizers exhibit better scalability compared to 2D tokenizers; (2) Asymmetric model scaling, prioritizing decoder scaling over encoder scaling, proves effective; (3) Entropy loss [69] becomes crucial for convergence when training tokenizers with billion-level parameters. With our semantic regularization and three key scaling strategies, we effectively scale GigaTok to 3 billion parameters, overcoming the reconstruction vs. generation dilemma." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 617, + 246, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 617, + 246, + 628 + ], + "spans": [ + { + "bbox": [ + 67, + 617, + 246, + 628 + ], + "type": "text", + "content": "We summarize our contributions as follows:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 630, + 296, + 714 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 55, + 630, + 295, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 630, + 295, + 689 + ], + "spans": [ + { + "bbox": [ + 55, + 630, + 295, + 689 + ], + "type": "text", + "content": "- We identify that the reconstruction vs. generation dilemma in tokenizer scaling stems from increased latent space complexity in larger tokenizers. To address this, we propose semantic regularization, effectively mitigating the dilemma and enabling tokenizer scaling." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "text", + "content": "- We explore best practices for scaling tokenizers, including 1D tokenizers with hybrid CNN-Transformer archi" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 322, + 281, + 555, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 322, + 281, + 555, + 304 + ], + "spans": [ + { + "bbox": [ + 322, + 281, + 555, + 304 + ], + "type": "text", + "content": "tecture, asymmetric encoder-decoder scaling, and entropy loss for billion-scale tokenizers." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 314, + 305, + 555, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 305, + 555, + 342 + ], + "spans": [ + { + "bbox": [ + 314, + 305, + 555, + 342 + ], + "type": "text", + "content": "- Our GigaTok is the first tokenizer scaled to 3B, achieving state-of-the-art reconstruction, downstream AR generation, and downstream AR representation on ImageNet." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 314, + 355, + 400, + 368 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 355, + 400, + 368 + ], + "spans": [ + { + "bbox": [ + 314, + 355, + 400, + 368 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 312, + 376, + 555, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 376, + 555, + 567 + ], + "spans": [ + { + "bbox": [ + 312, + 376, + 555, + 567 + ], + "type": "text", + "content": "Image tokenizers. Image tokenizers map image inputs into discrete [15, 56, 66] or continuous [28] tokens which can be modeled by downstream generative models. For discrete tokenizers, Vector Quantization (VQ) [15, 56, 66] is dominantly adopted. Recently, new quantization methods [49, 69, 75, 76] have also been proposed for better scaling of codebook size. However, how to properly scale up tokenizer models is insufficiently studied in existing literature. ViT-VQGAN [66] and TiTok [70] utilize transformer architecture to enable convenient scaling of tokenizers, but end up training their best generative models on smaller tokenizer versions. A concurrent work, ViTok [76], suggests de-prioritizing VAE scaling due to its less predictable effect for downstream diffusion models. We observe a similar reconstruction vs. generation dilemma in scaling discrete tokenizers, and provide our analysis and solution to it." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 569, + 556, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 569, + 556, + 676 + ], + "spans": [ + { + "bbox": [ + 313, + 569, + 556, + 676 + ], + "type": "text", + "content": "Autoregressive Visual Generation. Autoregressive visual generative models [33, 38, 40, 49, 50, 56, 58, 60, 66] follow the next-token-prediction (NTP) approach of LLMs, enabling the leverage of advancements in LLMs and simplifying the path to unified multi-modal generation. Other variants utilize visual-specific paradigms such as mask image modeling [8, 61, 69, 70] and next-scale-prediction [36, 53] for better performance. We reveal that scaling tokenizers helps NTP AR models to be comparable to these variants." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 677, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 677, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 677, + 556, + 715 + ], + "type": "text", + "content": "Semantic Guidance for Visual Generative Models and Tokenizers. The guidance from visual foundation models [7, 23, 43, 46, 72] has been used to improve training" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 296, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 296, + 204 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 296, + 204 + ], + "type": "text", + "content": "convergence speed and quality [65, 71] of visual generative models, as well as enhancing representation quality or downstream performance of visual tokenizers [9, 10, 18, 36, 41, 59, 63–65, 68, 73, 76, 77]. REPA [71] presents impressive performance improvements brought by a simple representation alignment strategy, and recently, VA-VAE [65] shows the significant benefits of semantic guidance to the reconstruction-generation Pareto Frontier of VAEs. Different from existing work, GigaTok novelly reveals the critical role of semantic regularization for resolving the reconstruction vs. generation dilemma in scaling visual tokenizers." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 214, + 128, + 227 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 214, + 128, + 227 + ], + "spans": [ + { + "bbox": [ + 55, + 214, + 128, + 227 + ], + "type": "text", + "content": "3. Pilot Study" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 234, + 296, + 294 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 234, + 296, + 294 + ], + "spans": [ + { + "bbox": [ + 55, + 234, + 296, + 294 + ], + "type": "text", + "content": "We first introduce AR Probing as a proxy to effectively monitor the tokenizer's effectiveness for downstream generation (Sec 3.1), followed by a pilot experiment that investigates the reconstruction vs. generation challenges when naively scaling visual tokenizers (Sec 3.2)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 301, + 253, + 313 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 301, + 253, + 313 + ], + "spans": [ + { + "bbox": [ + 55, + 301, + 253, + 313 + ], + "type": "text", + "content": "3.1. AR Probing for Tokenizer Evaluation" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 319, + 296, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 319, + 296, + 462 + ], + "spans": [ + { + "bbox": [ + 55, + 319, + 296, + 462 + ], + "type": "text", + "content": "In autoregressive visual generation, the training of the tokenizer and downstream AR model are performed in separate stages. In the first stage, a visual tokenizer is trained to compress images into discrete tokens, optimized with reconstruction objective. In the second stage, the downstream generative model is trained based on the discrete tokens from the pre-trained tokenizer. However, a tokenizer that performs well in terms of reconstruction fidelity in the first stage may not necessarily lead to better performance for downstream generative models. Thus, it is crucial to evaluate the effectiveness of the trained tokenizers for downstream generation alongside its reconstruction quality." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 463, + 296, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 463, + 296, + 569 + ], + "spans": [ + { + "bbox": [ + 55, + 463, + 296, + 569 + ], + "type": "text", + "content": "Despite its importance, assessing how a tokenizer influences downstream generation models can be computationally expensive. For example, sufficiently training a 343M parameter downstream AR generator takes 170 hours on 64 V100 GPUs. To address this challenge, we introduce AR Probing, inspired by Linear Probing in representation learning literature [11, 23]. The key idea is to use the performance of a small AR model as a proxy to reflect the performance trends of large-scale AR models." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 570, + 296, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 570, + 296, + 689 + ], + "spans": [ + { + "bbox": [ + 55, + 570, + 296, + 689 + ], + "type": "text", + "content": "Specifically, we use the tokenizer to train a small Llama-style model [50, 54] (111M parameters) for 50 epochs, and evaluate its gFID [24], validation loss, and linear probing accuracy [11, 23] for a fair comparison between different tokenizers. Training the proposed AR Probing model for evaluating tokenizers is " + }, + { + "bbox": [ + 55, + 570, + 296, + 689 + ], + "type": "inline_equation", + "content": "10 \\times" + }, + { + "bbox": [ + 55, + 570, + 296, + 689 + ], + "type": "text", + "content": " more efficient than training the original 343M downstream AR model. Our experiments in Sec. 5.1 (Fig. 6) demonstrate that the trends observed with AR Probing align with the performance of the large-scale AR models after sufficient training." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "text", + "content": "gFID. The generation FID [24] of AR probing indicates the overall image generation performance of the two-stage" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 318, + 72, + 394, + 137 + ], + "blocks": [ + { + "bbox": [ + 318, + 72, + 394, + 137 + ], + "lines": [ + { + "bbox": [ + 318, + 72, + 394, + 137 + ], + "spans": [ + { + "bbox": [ + 318, + 72, + 394, + 137 + ], + "type": "image", + "image_path": "957829c2bd5cc39a6f9d659e080757c607882628c29f8d7313cc9801a5dce8e6.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 148, + 554, + 214 + ], + "lines": [ + { + "bbox": [ + 313, + 148, + 554, + 214 + ], + "spans": [ + { + "bbox": [ + 313, + 148, + 554, + 214 + ], + "type": "text", + "content": "Figure 3. Scaling trend for vanilla 1D tokenizers. As the model size increases, the reconstruction quality of vanilla tokenizers improves but the downstream AR Probing gFID consistently degrades. The increasing AR Probing validation loss indicates that scaling vanilla tokenizers results in a more complex latent space, making it difficult for AR models to learn effectively." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 394, + 72, + 474, + 137 + ], + "blocks": [ + { + "bbox": [ + 394, + 72, + 474, + 137 + ], + "lines": [ + { + "bbox": [ + 394, + 72, + 474, + 137 + ], + "spans": [ + { + "bbox": [ + 394, + 72, + 474, + 137 + ], + "type": "image", + "image_path": "9beeaae878ebd25ec7a8ed67c02f768536aee94a7f93708f963790666b9054d6.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 477, + 72, + 552, + 137 + ], + "blocks": [ + { + "bbox": [ + 477, + 72, + 552, + 137 + ], + "lines": [ + { + "bbox": [ + 477, + 72, + 552, + 137 + ], + "spans": [ + { + "bbox": [ + 477, + 72, + 552, + 137 + ], + "type": "image", + "image_path": "ef14e0107eeace631870127030a6ebc25126aabfd00217ebbda2c0dfe9e87aee.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 225, + 553, + 273 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 225, + 553, + 273 + ], + "spans": [ + { + "bbox": [ + 313, + 225, + 553, + 273 + ], + "type": "text", + "content": "framework. It reflects both the reconstruction fidelity of the tokenizer and how well the downstream AR probing model can learn the dependency of the visual tokens (i.e., learnability of the token distribution)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 274, + 554, + 405 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 274, + 554, + 405 + ], + "spans": [ + { + "bbox": [ + 313, + 274, + 554, + 405 + ], + "type": "text", + "content": "Validation loss. We use the validation loss of the AR probing model to measure the learnability of the latent tokens as a disentangled factor. The validation loss is calculated as an average of the token-wise cross-entropy loss in the next-token-prediction paradigm on ImageNet [48] 50k validation set. With the same vocabulary size, the same number and structure of visual tokens, and the same AR probing model, larger validation loss indicates a latent space that is more difficult for the AR model to learn. Therefore, we use validation loss to reflect the latent space complexity and learnability for AR models." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 405, + 554, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 405, + 554, + 502 + ], + "spans": [ + { + "bbox": [ + 313, + 405, + 554, + 502 + ], + "type": "text", + "content": "Linear probing accuracy. Beyond visual generation quality, we also investigate whether scaling tokenizers will lead to better visual representations of AR models, which may provide inspiration for future research in unified multimodal understanding and generation with AR models. To assess the representation quality, we adopt the standard practice [11, 23] of linear probing accuracy using features from the middle Transformer layer of the AR probing model." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 509, + 541, + 521 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 509, + 541, + 521 + ], + "spans": [ + { + "bbox": [ + 313, + 509, + 541, + 521 + ], + "type": "text", + "content": "3.2. Naively Scaling Tokenizers Does Not Work" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 527, + 554, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 527, + 554, + 694 + ], + "spans": [ + { + "bbox": [ + 313, + 527, + 554, + 694 + ], + "type": "text", + "content": "To study the challenges when naively scaling visual tokenizers, we train three vector-quantized tokenizers1 on ImageNet [48] at " + }, + { + "bbox": [ + 313, + 527, + 554, + 694 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 313, + 527, + 554, + 694 + ], + "type": "text", + "content": " resolution with increasing model sizes. As shown in Fig. 3, as the tokenizer size increases, although the reconstruction quality (rFID) consistently improves, the AR generation performance (gFID) significantly degrades. This highlights the reconstruction vs. generation dilemma in tokenizer scaling. Moreover, we observe that the validation loss of AR Probing consistently increases as the tokenizers scale, indicating that larger tokenizers lead to complicated token dependencies that are more difficult for the AR model to learn. This observation motivates us to design the semantic regularization to constrain the latent space complexity of the tokenizer and therefore break the" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 314, + 703, + 484, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 703, + 484, + 712 + ], + "spans": [ + { + "bbox": [ + 314, + 703, + 484, + 712 + ], + "type": "text", + "content": "The tokenizer architectures are described in Sec. 4.1" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 258, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 258, + 83 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 258, + 83 + ], + "type": "text", + "content": "reconstruction vs. generation dilemma in Sec. 4.2." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 95, + 116, + 109 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 95, + 116, + 109 + ], + "spans": [ + { + "bbox": [ + 55, + 95, + 116, + 109 + ], + "type": "text", + "content": "4. GigaTok" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 114, + 296, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 114, + 296, + 236 + ], + "spans": [ + { + "bbox": [ + 55, + 114, + 296, + 236 + ], + "type": "text", + "content": "In this section, we introduce the model structure and training strategies for our scalable visual tokenizer, GigaTok. In Sec. 4.1, we present a tokenizer backbone supporting 1D and 2D token structures, and discuss the asymmetric scaling strategies for the encoder and decoder. In Sec. 4.2, we introduce semantic regularization, which breaks the reconstruction vs. generation dilemma by regularizing the complexity of the latent space with pre-trained visual representations. In Sec. 4.3, we show how entropy loss [69] facilitates the convergence of billion-scale tokenizers." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 242, + 138, + 254 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 242, + 138, + 254 + ], + "spans": [ + { + "bbox": [ + 55, + 242, + 138, + 254 + ], + "type": "text", + "content": "4.1. Architecture" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 259, + 296, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 259, + 296, + 451 + ], + "spans": [ + { + "bbox": [ + 55, + 259, + 296, + 451 + ], + "type": "text", + "content": "The CNN [32] architectures have been the dominant choices for image tokenizers [15, 40, 69, 76] due to their effectiveness in capturing fine-grained local details. Yet, Transformers are more scalable architectures with less inductive bias. Thus, we design a vector quantized tokenizer backbone with a hybrid architecture that combines CNN [15, 32] and Transformer [6, 13, 57] for encoder and decoder (Fig. 4). Specifically, our encoder consists of a series of CNN blocks that progressively downsamples the input image by a factor of " + }, + { + "bbox": [ + 55, + 259, + 296, + 451 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 55, + 259, + 296, + 451 + ], + "type": "text", + "content": ", followed by Transformer layers and a vector quantizer to produce discrete latent codes. Similarly, our decoder consists of multiple Transformer layers, followed by CNN decoders which upsamples the features to obtain the reconstructed image2. Our tokenizer architecture can be adapted to both 1D and 2D tokenizers by using different Transformer designs introduced in the next two paragraphs." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 453, + 296, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 453, + 296, + 502 + ], + "spans": [ + { + "bbox": [ + 55, + 453, + 296, + 502 + ], + "type": "text", + "content": "2D tokenizers with ViT. For 2D tokenizers, the Transformers in both tokenizer encoder and decoder are implemented by ViT [13] architecture. 2D structures of the latent features and tokens are preserved throughout the tokenizer." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 504, + 296, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 504, + 296, + 624 + ], + "spans": [ + { + "bbox": [ + 55, + 504, + 296, + 624 + ], + "type": "text", + "content": "1D tokenizers with Q-Former. For 1D tokenizers, we implement the Transformer modules in both encoder and decoder as Q-Formers [6, 34]. The Q-Former in the encoder employs 1D queries, transforming 2D input features into 1D latent tokens. The Q-Former in the decoder utilizes 2D queries to transform 1D latent tokens back to 2D features, which are then passed to the CNN decoder to reconstruct images. The 1D tokenizers remove the 2D inductive bias and demonstrate better scalability than 2D tokenizers in our experiments (Sec. 5.5)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 626, + 296, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 626, + 296, + 687 + ], + "spans": [ + { + "bbox": [ + 55, + 626, + 296, + 687 + ], + "type": "text", + "content": "Asymmetric encoder-decoder scaling. Since the decoder faces the more challenging task of reconstructing images from lossy latent codes, we adopt an asymmetric design for more efficient parameter allocation. Specifically, we scale both the encoder and decoder, while ensuring that" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 317, + 70, + 555, + 219 + ], + "blocks": [ + { + "bbox": [ + 317, + 70, + 555, + 219 + ], + "lines": [ + { + "bbox": [ + 317, + 70, + 555, + 219 + ], + "spans": [ + { + "bbox": [ + 317, + 70, + 555, + 219 + ], + "type": "image", + "image_path": "0c173660c2bed58e0acc195ee84ab6bec0a1321cd7227814a055257e9e31fd4a.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 228, + 555, + 238 + ], + "lines": [ + { + "bbox": [ + 313, + 228, + 555, + 238 + ], + "spans": [ + { + "bbox": [ + 313, + 228, + 555, + 238 + ], + "type": "text", + "content": "Figure 4. GigaTok architecture and semantic regularization." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 313, + 239, + 555, + 283 + ], + "lines": [ + { + "bbox": [ + 313, + 239, + 555, + 283 + ], + "spans": [ + { + "bbox": [ + 313, + 239, + 555, + 283 + ], + "type": "text", + "content": "Top: We use a hybrid CNN-Transformer design for our visual tokenizer. The transformer layers are implemented with ViT for 2D tokenizer and Q-Former for 1D tokenizer. Bottom: We use a frozen DINOv2 [43] image encoder for semantic regularization." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 297, + 554, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 297, + 554, + 346 + ], + "spans": [ + { + "bbox": [ + 313, + 297, + 554, + 346 + ], + "type": "text", + "content": "the decoders are always larger than the encoders. In practice, we maintain the same and fixed size for the CNN encoder/decoder and only increase the depth and width of the Transformer modules for scaling." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 352, + 454, + 365 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 352, + 454, + 365 + ], + "spans": [ + { + "bbox": [ + 313, + 352, + 454, + 365 + ], + "type": "text", + "content": "4.2. Semantic Regularization" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 369, + 554, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 369, + 554, + 464 + ], + "spans": [ + { + "bbox": [ + 313, + 369, + 554, + 464 + ], + "type": "text", + "content": "In our pilot study (Sec. 3.2), the latent space complexity significantly increases as the tokenizer scales, which potentially leads to worse downstream AR generation for larger tokenizers. We hypothesize that larger tokenizers tend to capture excessive fine-grained low-level details for better reconstruction, resulting in overly complex latent token distributions, which makes it harder for AR models to learn the token dependencies effectively." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 465, + 554, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 465, + 554, + 560 + ], + "spans": [ + { + "bbox": [ + 313, + 465, + 554, + 560 + ], + "type": "text", + "content": "To address this, we introduce semantic regularization to guide the tokenizer to encode a more semantically consistent latent space, which is less complex and easier for downstream generative modeling. Specifically, we introduce a simple semantic regularization term alongside the tokenizer training objective. The regularization aligns the intermediate features of the tokenizer decoder with the feature representations extracted from pre-trained frozen DINOv2 [43]." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 560, + 554, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 560, + 554, + 609 + ], + "spans": [ + { + "bbox": [ + 313, + 560, + 554, + 609 + ], + "type": "text", + "content": "Mathematically, let " + }, + { + "bbox": [ + 313, + 560, + 554, + 609 + ], + "type": "inline_equation", + "content": "f^{\\mathrm{dec},l}" + }, + { + "bbox": [ + 313, + 560, + 554, + 609 + ], + "type": "text", + "content": " be the output feature of the " + }, + { + "bbox": [ + 313, + 560, + 554, + 609 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 313, + 560, + 554, + 609 + ], + "type": "text", + "content": "-th layer of the Transformer decoder, " + }, + { + "bbox": [ + 313, + 560, + 554, + 609 + ], + "type": "inline_equation", + "content": "f^{\\mathrm{DINO}}" + }, + { + "bbox": [ + 313, + 560, + 554, + 609 + ], + "type": "text", + "content": " be the semantic features of a pretrained image encoder (here DINOv2-B [43]). The semantic regularization can be represented as:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 354, + 617, + 555, + 650 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 354, + 617, + 555, + 650 + ], + "spans": [ + { + "bbox": [ + 354, + 617, + 555, + 650 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {r e g}} = \\frac {1}{N} \\sum_ {n = 1} ^ {N} \\sin \\left(f _ {n} ^ {\\mathrm {d e c}, l}, \\phi \\left(f _ {n} ^ {\\mathrm {D I N O}}\\right)\\right) \\tag {1}", + "image_path": "3d5da0aa1a4a8a7f9e6fd7c5016ffb83c94f6b86859864d66000d82cdf735b24.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 653, + 555, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 653, + 555, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 653, + 555, + 715 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 653, + 555, + 715 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 313, + 653, + 555, + 715 + ], + "type": "text", + "content": " is the batch size, " + }, + { + "bbox": [ + 313, + 653, + 555, + 715 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 313, + 653, + 555, + 715 + ], + "type": "text", + "content": " is the image index, " + }, + { + "bbox": [ + 313, + 653, + 555, + 715 + ], + "type": "inline_equation", + "content": "\\mathrm{sim}(\\cdot ,\\cdot)" + }, + { + "bbox": [ + 313, + 653, + 555, + 715 + ], + "type": "text", + "content": " is a cosine similarity function, and " + }, + { + "bbox": [ + 313, + 653, + 555, + 715 + ], + "type": "inline_equation", + "content": "\\phi (\\cdot)" + }, + { + "bbox": [ + 313, + 653, + 555, + 715 + ], + "type": "text", + "content": " is an MLP that projects decoder feature " + }, + { + "bbox": [ + 313, + 653, + 555, + 715 + ], + "type": "inline_equation", + "content": "f^{\\mathrm{dec},l}" + }, + { + "bbox": [ + 313, + 653, + 555, + 715 + ], + "type": "text", + "content": " to match the channel dimension of " + }, + { + "bbox": [ + 313, + 653, + 555, + 715 + ], + "type": "inline_equation", + "content": "f^{\\mathrm{DINO}}" + }, + { + "bbox": [ + 313, + 653, + 555, + 715 + ], + "type": "text", + "content": ". When training VQ tokenizers, we add the semantic regularization to the original VQGAN [15, 50] objectives:" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 55, + 693, + 295, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 693, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 693, + 295, + 713 + ], + "type": "text", + "content": "Throughout this work, we use downsample ratio " + }, + { + "bbox": [ + 55, + 693, + 295, + 713 + ], + "type": "inline_equation", + "content": "p = 16" + }, + { + "bbox": [ + 55, + 693, + 295, + 713 + ], + "type": "text", + "content": ", codebook dimension " + }, + { + "bbox": [ + 55, + 693, + 295, + 713 + ], + "type": "inline_equation", + "content": "D = 8" + }, + { + "bbox": [ + 55, + 693, + 295, + 713 + ], + "type": "text", + "content": ", and codebook size 16384 by default." + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 58, + 71, + 294, + 142 + ], + "blocks": [ + { + "bbox": [ + 58, + 71, + 294, + 142 + ], + "lines": [ + { + "bbox": [ + 58, + 71, + 294, + 142 + ], + "spans": [ + { + "bbox": [ + 58, + 71, + 294, + 142 + ], + "type": "image", + "image_path": "2a947eeb75e4a83e727c41d1f9c1ee05719590673ed1bddb84605054f0989839.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 150, + 295, + 195 + ], + "lines": [ + { + "bbox": [ + 55, + 150, + 295, + 195 + ], + "spans": [ + { + "bbox": [ + 55, + 150, + 295, + 195 + ], + "type": "text", + "content": "Figure 5. Training curves for 2.9B XL-XXL tokenizers with and without entropy loss. A 2.9B tokenizer does not converge without entropy loss. The entropy loss encourages high codebook usage and stabilizes training loss." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 126, + 224, + 295, + 237 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 224, + 295, + 237 + ], + "spans": [ + { + "bbox": [ + 126, + 224, + 295, + 237 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {t o t a l}} = \\mathcal {L} _ {\\mathrm {v q g a n}} + \\lambda \\mathcal {L} _ {\\text {r e g}}, \\tag {2}", + "image_path": "700aba12e34d4e0774e33b78bc1075413f3a17d8a9652afd2b5f318082fa9283.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 243, + 296, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 243, + 296, + 304 + ], + "spans": [ + { + "bbox": [ + 55, + 243, + 296, + 304 + ], + "type": "text", + "content": "and we empirically set " + }, + { + "bbox": [ + 55, + 243, + 296, + 304 + ], + "type": "inline_equation", + "content": "\\lambda = 0.5" + }, + { + "bbox": [ + 55, + 243, + 296, + 304 + ], + "type": "text", + "content": " in this work. Here " + }, + { + "bbox": [ + 55, + 243, + 296, + 304 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{vqgan}}" + }, + { + "bbox": [ + 55, + 243, + 296, + 304 + ], + "type": "text", + "content": " is a combination of multiple losses, including " + }, + { + "bbox": [ + 55, + 243, + 296, + 304 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{recon}}" + }, + { + "bbox": [ + 55, + 243, + 296, + 304 + ], + "type": "text", + "content": ", the " + }, + { + "bbox": [ + 55, + 243, + 296, + 304 + ], + "type": "inline_equation", + "content": "l_{2}" + }, + { + "bbox": [ + 55, + 243, + 296, + 304 + ], + "type": "text", + "content": " reconstruction loss on image pixels, " + }, + { + "bbox": [ + 55, + 243, + 296, + 304 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{percp}}" + }, + { + "bbox": [ + 55, + 243, + 296, + 304 + ], + "type": "text", + "content": ", the perceptual loss [27, 74], " + }, + { + "bbox": [ + 55, + 243, + 296, + 304 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{GAN}}" + }, + { + "bbox": [ + 55, + 243, + 296, + 304 + ], + "type": "text", + "content": ", PatchGAN [26] adversarial loss, and " + }, + { + "bbox": [ + 55, + 243, + 296, + 304 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{VQ}}" + }, + { + "bbox": [ + 55, + 243, + 296, + 304 + ], + "type": "text", + "content": " [15, 66] the VQ codebook loss." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 310, + 274, + 323 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 310, + 274, + 323 + ], + "spans": [ + { + "bbox": [ + 55, + 310, + 274, + 323 + ], + "type": "text", + "content": "4.3. Entropy Loss for Billion-Level Tokenizers" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 328, + 296, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 328, + 296, + 411 + ], + "spans": [ + { + "bbox": [ + 55, + 328, + 296, + 411 + ], + "type": "text", + "content": "When training a 2.9B tokenizer, we find that using the same training recipe as the 622M tokenizer leads to convergence failure for both perceptual loss and reconstruction loss, and consistently low codebook usage. We hypothesize that low codebook usage accounts for the convergence difficulty. To address this, we incorporate entropy penalty [67, 69] to encourage higher codebook utilization:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 422, + 295, + 435 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 422, + 295, + 435 + ], + "spans": [ + { + "bbox": [ + 111, + 422, + 295, + 435 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {e n t r o p y}} = \\mathbb {E} _ {\\mathbf {z}} [ H (\\hat {\\mathbf {z}} | \\mathbf {z}) ] - H (\\hat {\\mathbf {z}}) \\tag {3}", + "image_path": "c26fc826026f4d0cbd93db0771b9f0580709032d81e1bda4477e2c5a153495cd.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 444, + 296, + 577 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 444, + 296, + 577 + ], + "spans": [ + { + "bbox": [ + 55, + 444, + 296, + 577 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 444, + 296, + 577 + ], + "type": "inline_equation", + "content": "H(\\cdot)" + }, + { + "bbox": [ + 55, + 444, + 296, + 577 + ], + "type": "text", + "content": " denotes the Shannon entropy, " + }, + { + "bbox": [ + 55, + 444, + 296, + 577 + ], + "type": "inline_equation", + "content": "\\mathbf{z} \\in \\mathbb{R}^D" + }, + { + "bbox": [ + 55, + 444, + 296, + 577 + ], + "type": "text", + "content": " is the input for quantizer to be quantized to " + }, + { + "bbox": [ + 55, + 444, + 296, + 577 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{z}} = \\mathbf{c}_i \\in \\mathbb{R}^D" + }, + { + "bbox": [ + 55, + 444, + 296, + 577 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 444, + 296, + 577 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_i" + }, + { + "bbox": [ + 55, + 444, + 296, + 577 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 55, + 444, + 296, + 577 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 55, + 444, + 296, + 577 + ], + "type": "text", + "content": "-th codebook vector. " + }, + { + "bbox": [ + 55, + 444, + 296, + 577 + ], + "type": "inline_equation", + "content": "\\mathbb{E}_{\\mathbf{z}}[H(\\hat{\\mathbf{z}}|\\mathbf{z})]" + }, + { + "bbox": [ + 55, + 444, + 296, + 577 + ], + "type": "text", + "content": " penalizes the uncertainty in quantization to reduce quantization error, and " + }, + { + "bbox": [ + 55, + 444, + 296, + 577 + ], + "type": "inline_equation", + "content": "-H(\\hat{\\mathbf{z}})" + }, + { + "bbox": [ + 55, + 444, + 296, + 577 + ], + "type": "text", + "content": " encourages the codebook vectors to be selected more uniformly across the entire codebook. The detailed derivation can be found in our supp. We find that the entropy penalty addresses the convergence difficulty of large tokenizers. As shown in Fig. 5, introducing entropy loss to the 2.9B tokenizer enables the codebook usage to quickly reach a high level, and the loss converges properly3." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 587, + 137, + 601 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 587, + 137, + 601 + ], + "spans": [ + { + "bbox": [ + 55, + 587, + 137, + 601 + ], + "type": "text", + "content": "5. Experiments" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 607, + 116, + 620 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 607, + 116, + 620 + ], + "spans": [ + { + "bbox": [ + 55, + 607, + 116, + 620 + ], + "type": "text", + "content": "5.1. Settings" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 625, + 296, + 686 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 625, + 296, + 686 + ], + "spans": [ + { + "bbox": [ + 55, + 625, + 296, + 686 + ], + "type": "text", + "content": "For scaling up visual tokenizers, we follow the architecture configurations for the Transformers in GigaTok tokenizers as summarized in Tab. 1. We evaluate the tokenizers from three perspectives: reconstruction, downstream AR generation, and downstream AR representation quality. We use" + } + ] + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 324, + 70, + 547, + 187 + ], + "blocks": [ + { + "bbox": [ + 324, + 70, + 547, + 187 + ], + "lines": [ + { + "bbox": [ + 324, + 70, + 547, + 187 + ], + "spans": [ + { + "bbox": [ + 324, + 70, + 547, + 187 + ], + "type": "table", + "html": "
TypeEnc./Dec.Params.BlocksHeadsDim.
1D Tok.S26M68512
B115M1212768
L405M24161024
XL948M36201280
XXL1870M48241536
2D Tok.S19M68512
B86M1212768
L329M24161024
", + "image_path": "79b53a77bb3a6bf0032d29e80aaf661efdeae855aaf6a8344064a59663aa038f.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 195, + 553, + 228 + ], + "lines": [ + { + "bbox": [ + 313, + 195, + 553, + 228 + ], + "spans": [ + { + "bbox": [ + 313, + 195, + 553, + 228 + ], + "type": "text", + "content": "Table 1. Architectures of the transformer variants for tokenizer encoder/decoder parts in our experiments. We use Q-Former [6, 34] for 1D tokenizers and ViT [13] for 2D tokenizers." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 315, + 241, + 432, + 292 + ], + "blocks": [ + { + "bbox": [ + 315, + 241, + 432, + 292 + ], + "lines": [ + { + "bbox": [ + 315, + 241, + 432, + 292 + ], + "spans": [ + { + "bbox": [ + 315, + 241, + 432, + 292 + ], + "type": "image", + "image_path": "f8643373abb334d25b9757a02516d0285a771299f840593828f0c30f059ad67d.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 301, + 554, + 378 + ], + "lines": [ + { + "bbox": [ + 313, + 301, + 554, + 378 + ], + "spans": [ + { + "bbox": [ + 313, + 301, + 554, + 378 + ], + "type": "text", + "content": "Figure 6. Correlation between AR Probing Performance and Larger AR models. For 3 tokenizers: S-S, S-L, and B-L, we present that as the tokenizer improves, the performance improvements of AR Probing correlate to the performance improvements of larger AR models. Therefore, the AR Probing can effectively indicate how the tokenizer affects downstream larger AR models with limited computational costs." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 432, + 242, + 552, + 293 + ], + "blocks": [ + { + "bbox": [ + 432, + 242, + 552, + 293 + ], + "lines": [ + { + "bbox": [ + 432, + 242, + 552, + 293 + ], + "spans": [ + { + "bbox": [ + 432, + 242, + 552, + 293 + ], + "type": "image", + "image_path": "18a6c7c53b14968caeb727bb6a912f7d19b3dcee70012feb02222289b7bdc5de.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 396, + 554, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 396, + 554, + 540 + ], + "spans": [ + { + "bbox": [ + 313, + 396, + 554, + 540 + ], + "type": "text", + "content": "rFID and LPIPS [74] to evaluate reconstruction fidelity, gFID to evaluate generation performance, and linear probing to evaluate the representation quality of the downstream AR model. Our downstream AR models are LlamaGen [50] with 1D absolute positional embedding. Our scaling experiments (Sec. 5.2) and ablation study (Sec. 5.3) use AR Probing (111M AR model described in Sec.3.1) validation loss, gFID, and linear probing to reflect the learnability of tokens, generation performance, and representation quality, respectively. While in the system-level comparison (Sec. 5.4), we train larger 1.4B AR models for comparison with previous work. More details are in the supplementary material." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 540, + 554, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 540, + 554, + 601 + ], + "spans": [ + { + "bbox": [ + 313, + 540, + 554, + 601 + ], + "type": "text", + "content": "Effectiveness of AR Probing. As shown in Fig. 6, AR Probing performances including gFID and linear probing accuracy align with the larger LlamaGen-XL [50] model results. Therefore, we use AR Probing throughout the following experiments except for the system-level comparison." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 610, + 512, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 610, + 512, + 624 + ], + "spans": [ + { + "bbox": [ + 313, + 610, + 512, + 624 + ], + "type": "text", + "content": "5.2. Scaling with Semantic Regularization" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 629, + 553, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 629, + 553, + 664 + ], + "spans": [ + { + "bbox": [ + 313, + 629, + 553, + 664 + ], + "type": "text", + "content": "We demonstrate that our proposed semantic regularization resolves the reconstruction vs. generation dilemma in scaling tokenizers." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 313, + 665, + 554, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 665, + 554, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 665, + 554, + 714 + ], + "type": "text", + "content": "Model scaling with semantic regularization. Results are shown in Fig. 7. (1) Semantic regularization improves the reconstruction fidelity, indicated by lower rFID. (2) More importantly, the AR Probing validation loss and gFID de" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 55, + 693, + 295, + 714 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 693, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 693, + 295, + 714 + ], + "type": "text", + "content": "We take perceptual loss as an example, and reconstruction loss shows a similar pattern" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 59, + 88, + 142, + 182 + ], + "blocks": [ + { + "bbox": [ + 59, + 88, + 142, + 182 + ], + "lines": [ + { + "bbox": [ + 59, + 88, + 142, + 182 + ], + "spans": [ + { + "bbox": [ + 59, + 88, + 142, + 182 + ], + "type": "image", + "image_path": "9acdec04d8802aac59b5a78b7e590b8e1c81de2dec608a2de7b670863a259d32.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 193, + 555, + 248 + ], + "lines": [ + { + "bbox": [ + 55, + 193, + 555, + 248 + ], + "spans": [ + { + "bbox": [ + 55, + 193, + 555, + 248 + ], + "type": "text", + "content": "Figure 7. Scaling trends of tokenizers for reconstruction, downstream generation and representation quality with and without semantic regularization. By semantic regularization, GigaTok resolves the reconstruction vs. generation dilemma for tokenizer scaling in contrast to the vanilla version without semantic regularization. Moreover, GigaTok consistently improves the representation quality of downstream AR models by scaling up visual tokenizers. Note that in the last two figures, the red and blue curves correspond to different scales on the y-axis." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 167, + 88, + 240, + 182 + ], + "blocks": [ + { + "bbox": [ + 180, + 75, + 427, + 86 + ], + "lines": [ + { + "bbox": [ + 180, + 75, + 427, + 86 + ], + "spans": [ + { + "bbox": [ + 180, + 75, + 427, + 86 + ], + "type": "text", + "content": "w/o semantic regularization w/ semantic regularization" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 167, + 88, + 240, + 182 + ], + "lines": [ + { + "bbox": [ + 167, + 88, + 240, + 182 + ], + "spans": [ + { + "bbox": [ + 167, + 88, + 240, + 182 + ], + "type": "image", + "image_path": "2257c1fefec13ade9fff8b68b9354ff3fdf8dc5549b55947d1ab3e70a2d4d28e.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 245, + 88, + 358, + 182 + ], + "blocks": [ + { + "bbox": [ + 245, + 88, + 358, + 182 + ], + "lines": [ + { + "bbox": [ + 245, + 88, + 358, + 182 + ], + "spans": [ + { + "bbox": [ + 245, + 88, + 358, + 182 + ], + "type": "image", + "image_path": "aa782d3f324e1ed9e8dfe46438225cb4c23c64580e41df85bdd83c38411467d8.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 360, + 88, + 446, + 182 + ], + "blocks": [ + { + "bbox": [ + 360, + 88, + 446, + 182 + ], + "lines": [ + { + "bbox": [ + 360, + 88, + 446, + 182 + ], + "spans": [ + { + "bbox": [ + 360, + 88, + 446, + 182 + ], + "type": "image", + "image_path": "4c7ed3e7acb8d6a5d8a3737863302c3fcc97ce6fe3d7eebe9cf13c07e2792d61.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 446, + 88, + 552, + 182 + ], + "blocks": [ + { + "bbox": [ + 446, + 88, + 552, + 182 + ], + "lines": [ + { + "bbox": [ + 446, + 88, + 552, + 182 + ], + "spans": [ + { + "bbox": [ + 446, + 88, + 552, + 182 + ], + "type": "image", + "image_path": "368c793c150808e4ed2a72a13b21b120c092ed9773c0128a63932362974ebca5.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 58, + 262, + 294, + 310 + ], + "blocks": [ + { + "bbox": [ + 58, + 262, + 294, + 310 + ], + "lines": [ + { + "bbox": [ + 58, + 262, + 294, + 310 + ], + "spans": [ + { + "bbox": [ + 58, + 262, + 294, + 310 + ], + "type": "image", + "image_path": "a74a26f6ca59a171d98ac0daf44e41e2db54df84c3c51938647e5d2387d887a0.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 319, + 296, + 407 + ], + "lines": [ + { + "bbox": [ + 55, + 319, + 296, + 407 + ], + "spans": [ + { + "bbox": [ + 55, + 319, + 296, + 407 + ], + "type": "text", + "content": "Figure 8. Visualization of tokenizer features with and without semantic regularization. We compute PCA among the tokenizer features of a group of images of the same \"golden retriever\" class and visualize the first 3 PCA components. We observe that the latent space of vanilla tokenizers shows inconsistent features both within a single image or across multiple semantically similar images. In contrast, GigaTok encodes images with semantic consistency and thus reduces the latent space complexity for AR models." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 423, + 295, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 423, + 295, + 506 + ], + "spans": [ + { + "bbox": [ + 55, + 423, + 295, + 506 + ], + "type": "text", + "content": "grades for larger tokenizers without semantic regularization, showing the reconstruction vs. generation dilemma. The dilemma is addressed with semantic regularization, evidenced by the relatively constrained validation loss and consistently decreasing gFID. (3) The Linear Probing results show that semantic regularization helps AR models to learn better representations as the tokenizer model scales up." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 507, + 295, + 614 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 507, + 295, + 614 + ], + "spans": [ + { + "bbox": [ + 55, + 507, + 295, + 614 + ], + "type": "text", + "content": "Visualization for the tokenizer feature space. We visualize the first 3 PCA components of the tokenizer features from the first Transformer decoder layer for a group of images. As shown in Fig. 8, we find the vanilla tokenizer encodes a latent space with limited semantic consistency, which potentially impairs its learnability for downstream AR models. In contrast, GigaTok presents semantically consistent patterns (Fig. 8), indicating a meaningful and consistent latent space." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 621, + 280, + 633 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 621, + 280, + 633 + ], + "spans": [ + { + "bbox": [ + 55, + 621, + 280, + 633 + ], + "type": "text", + "content": "5.3. Asymmetric 1D Tokenizer is More Scalable" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 55, + 639, + 295, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 639, + 295, + 687 + ], + "spans": [ + { + "bbox": [ + 55, + 639, + 295, + 687 + ], + "type": "text", + "content": "Tokenizer decoder deserves more parameters. To determine whether the decoder or encoder should be prioritized when scaling up, we compare S-B" + }, + { + "bbox": [ + 55, + 639, + 295, + 687 + ], + "type": "inline_equation", + "content": "^4" + }, + { + "bbox": [ + 55, + 639, + 295, + 687 + ], + "type": "text", + "content": " and B-S tokenizers in Tab. 2, both trained under the same setting for 100" + } + ] + } + ], + "index": 12 + }, + { + "type": "table", + "bbox": [ + 315, + 262, + 559, + 340 + ], + "blocks": [ + { + "bbox": [ + 315, + 262, + 559, + 340 + ], + "lines": [ + { + "bbox": [ + 315, + 262, + 559, + 340 + ], + "spans": [ + { + "bbox": [ + 315, + 262, + 559, + 340 + ], + "type": "table", + "html": "
Enc./Dec. SizerFID↓LPIPS↓gFID↓Lin Acc.↑
B-S0.980.2216.5664.5
S-B0.940.2145.6559.8
S-L0.830.2065.1960.6
B-L0.810.2064.8266.9
", + "image_path": "7679cbf817b12edaa14706b6831569f0b7f4575518cabf088803fec2247636ff.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 349, + 555, + 393 + ], + "lines": [ + { + "bbox": [ + 313, + 349, + 555, + 393 + ], + "spans": [ + { + "bbox": [ + 313, + 349, + 555, + 393 + ], + "type": "text", + "content": "Table 2. The results for scaling encoder/decoder. Prioritizing the scaling of decoders benefits downstream generation more than scaling encoders (S-B v.s. B-S). But scaling encoders can still bring significant improvements (S-L v.s. B-L)." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 317, + 402, + 550, + 488 + ], + "blocks": [ + { + "bbox": [ + 317, + 402, + 550, + 488 + ], + "lines": [ + { + "bbox": [ + 317, + 402, + 550, + 488 + ], + "spans": [ + { + "bbox": [ + 317, + 402, + 550, + 488 + ], + "type": "image", + "image_path": "bd97cce6fd6b87446461e6eb3df1ebedede28a455da8eb46153e9b4f34ceeb2b.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 497, + 555, + 553 + ], + "lines": [ + { + "bbox": [ + 313, + 497, + 555, + 553 + ], + "spans": [ + { + "bbox": [ + 313, + 497, + 555, + 553 + ], + "type": "text", + "content": "Figure 9. Scalability comparison for 1D and 2D tokenizers. Using the same training setting, 1D tokenizers shows better reconstruction (rFID) and downstream representation quality (AR Probing: Lin Acc.). For downstream generation (gFID), 1D tokenizers present a steeper improving trend than 2D tokenizers." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 569, + 553, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 569, + 553, + 617 + ], + "spans": [ + { + "bbox": [ + 313, + 569, + 553, + 617 + ], + "type": "text", + "content": "epochs. Our results show that scaling decoders, rather than encoders, leads to greater improvements in both reconstruction and downstream generation, suggesting that decoder scaling should be prioritized." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 618, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 618, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 618, + 555, + 713 + ], + "type": "text", + "content": "Scaling tokenizer encoder is also important. While prioritizing the scaling of tokenizer decoders yields significant benefits, we also find that scaling tokenizer encoders can further enhance downstream models. In Tab. 2, we show that a B-L tokenizer gains significant improvements compared to an S-L tokenizer. Therefore, we recommend scaling both encoders and decoders while maintaining a larger decoder than the encoder for optimal performance." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 55, + 693, + 295, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 693, + 295, + 712 + ], + "spans": [ + { + "bbox": [ + 55, + 693, + 295, + 712 + ], + "type": "text", + "content": "X-Y tokenizer denotes X-sized encoder and Y-sized decoder. For example, S-B indicates Small encoder-Base decoder structure" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 63, + 70, + 548, + 466 + ], + "blocks": [ + { + "bbox": [ + 63, + 70, + 548, + 466 + ], + "lines": [ + { + "bbox": [ + 63, + 70, + 548, + 466 + ], + "spans": [ + { + "bbox": [ + 63, + 70, + 548, + 466 + ], + "type": "table", + "html": "
TokenizerTok. Type/Param.#TokensrFID↓Generator Model/Param.TypegFID↓Acc.↑
Continuous token modeling
VAE [47]KL†55M40960.27LDM-4 [47]400MDiff.3.60-
DiT-XL/2 [44]675MDiff.2.27-
SD-VAE [1]KL†84M10240.62SiT-XL/2 [42]675MDiff.2.06-
SiT-XL/2 + REPA [71]675MDiff.1.4274.6
VA-VAE [65]KL70M2560.28LightningDiT [65]675MDiff.1.35-
VAE [35]KL66M2560.53MAR-H [35]943MAR+Diff.1.5560.0°
Discrete token modeling
VQGAN [8]VQ66M2562.28MaskGIT [8]227MMask.6.18*-
TiTok-S [70]VQ72M1281.71MaskGIT-UViT-L [4, 8]287MMask.1.97-
TiTok-L [70]VQ641M322.21MaskGIT-ViT [8]177MMask.2.77-
B-AE-d32 [22]LFQ66M2561.69BiGR-XXL-d32 [22]1.5BAR+Diff2.36-
BiGR-XL-d32 [22]799MAR+Diff-69.8
VAR-Tok. [53]MSRQ†109M6801.00‡VAR-d24 [53]1.0BVAR2.09-
VAR-d30 [53]2.0BVAR1.92-
ImageFolder [36]MSRQ176M2860.80‡ImageFolder-VAR [36]362MVAR2.60-
VQGAN [15]VQ23M2564.98Taming-Tran. [15]1.4BAR15.78*-
ViT-VQGAN [66]VQ64M10241.28VIM-Large [66]1.7BAR4.17*-
RQ-VAE [33]RQ66M2563.20RQTran. [33]3.8BAR7.55*-
Open-MAGVIT2 [40]LFQ133M2561.17Open-MAGVIT2-XL [40]1.5BAR2.53-
IBQ [49]IBQ128M2561.37IBQ-XXL [49]2.1BAR2.05-
LlamaGen-Tok. [50]VQ72M2562.19LlamaGen-L [50]343MAR3.8140.5°
LlamaGen-XXL [50]1.4BAR3.09-
LlamaGen-Tok. [50]VQ72M5760.94LlamaGen-XXL [50]1.4BAR2.34-
GigaTok-B-LVQ622M2560.51‡LlamaGen-B (1d) [50]111MAR3.3367.7
GigaTok-S-SVQ136M2561.01LlamaGen-B (1d) [50]111MAR4.0562.6
GigaTok-S-BVQ232M2560.89LlamaGen-B (1d) [50]111MAR3.8362.9
GigaTok-B-LVQ622M2560.81LlamaGen-B (1d) [50]111MAR3.2667.6
LlamaGen-XXL (1d) [50]1.4BAR2.03*69.4
GigaTok-XL-XXLVQ2.9B2560.79LlamaGen-B (1d) [50]111MAR3.1572.0
LlamaGen-XXL (1d) [50]1.4BAR1.98*74.0
", + "image_path": "407127a1c9516fe4df42fdca4780af32a20fdb67d771d49da601eecdcc3f5880.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 473, + 555, + 508 + ], + "lines": [ + { + "bbox": [ + 55, + 473, + 555, + 508 + ], + "spans": [ + { + "bbox": [ + 55, + 473, + 555, + 508 + ], + "type": "text", + "content": "Table 3. System-level comparison for tokenizers and downstream generation models on ImageNet " + }, + { + "bbox": [ + 55, + 473, + 555, + 508 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 55, + 473, + 555, + 508 + ], + "type": "text", + "content": ". For gFID, we present the lowest value between w/ or w/o CFG scenarios. †: Training set includes data besides ImageNet. ‡: Using frozen DINO [7] for discriminator, which largely improves rFID. ☆: Without classifier-free-guidance. ◇: Data from BiGR [22]." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 55, + 522, + 295, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 522, + 295, + 630 + ], + "spans": [ + { + "bbox": [ + 55, + 522, + 295, + 630 + ], + "type": "text", + "content": "1D tokenizers are more scalable than 2D tokenizers. We train S-S, S-B and B-L 1D/2D tokenizers with the same setting with semantic regularization. As shown in Fig. 9, 1D tokenizers consistently achieve better rFID and AR Probing linear probing accuracy than 2D tokenizers. For AR Probing gFID, the 1D tokenizers exhibit a steeper scaling trend, eventually surpassing 2D tokenizers as the model scales. We attribute the superior scalability of 1D tokenizers to the reduced inductive bias." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 635, + 198, + 649 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 635, + 198, + 649 + ], + "spans": [ + { + "bbox": [ + 55, + 635, + 198, + 649 + ], + "type": "text", + "content": "5.4. System-level Comparison" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 653, + 296, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 653, + 296, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 653, + 296, + 715 + ], + "type": "text", + "content": "Experiment Settings. Using GigaTok for tokenization, we scale the training of LlamaGen [50] AR models on " + }, + { + "bbox": [ + 55, + 653, + 296, + 715 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 55, + 653, + 296, + 715 + ], + "type": "text", + "content": " ImageNet training set for 300 epochs to compare with other methods. We do not use AdaLN [44, 53] as it is specific for class-conditional generation. We provide" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 313, + 522, + 555, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 522, + 555, + 584 + ], + "spans": [ + { + "bbox": [ + 313, + 522, + 555, + 584 + ], + "type": "text", + "content": "the results of a B-L tokenizer trained with DINO discriminator [36, 53] to fairly compare rFID. But in practice we find DINO discriminator provides limited improvement for LPIPS and may affect the training stability of billion-scale tokenizers. Therefore, we exclude it from our main design." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 594, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 594, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 594, + 556, + 715 + ], + "type": "text", + "content": "Results. As shown in Tab. 3, our 2.9B GigaTok achieves state-of-the-art reconstruction performance (rIFD) among all discrete tokenizers. Furthermore, with our 2.9B tokenizer, the downstream 1.4B AR model achieves state-of-the-art image generation performance (gFID) among LLM-style autoregressive next-token-prediction models. VAR [53] predicts images with next-scale prediction rather than next-token-prediction, which is less compatible with language models. Our model achieves comparable gFID to VAR [53] with a simple LLM-style downstream AR genera" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 65, + 70, + 288, + 131 + ], + "blocks": [ + { + "bbox": [ + 65, + 70, + 288, + 131 + ], + "lines": [ + { + "bbox": [ + 65, + 70, + 288, + 131 + ], + "spans": [ + { + "bbox": [ + 65, + 70, + 288, + 131 + ], + "type": "table", + "html": "
Decoder\\AR Model SizeBLXXL
B3.7%2.3%1.3%
L11.2%7.0%3.4%
XXL32.4%20.3%9.9%
", + "image_path": "d13b14d845494dadf9a20a896b770459572d94b3dd032d5d792021b54867189b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 202, + 296, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 202, + 296, + 299 + ], + "spans": [ + { + "bbox": [ + 55, + 202, + 296, + 299 + ], + "type": "text", + "content": "tor without incorporating vision-specific designs like VAR. Moreover, this 1.4B AR model trained on the 2.9B tokenizer achieves state-of-the-art linear probing accuracy via visual generative pretraining5. This indicates that our GigaTok helps the downstream generation model to learn better representations. The high-quality representation learned from generative pre-training may also help unify generation and understanding for future native multimodal models." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 304, + 222, + 317 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 304, + 222, + 317 + ], + "spans": [ + { + "bbox": [ + 55, + 304, + 222, + 317 + ], + "type": "text", + "content": "5.5. Discussion and Ablation Study" + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 56, + 331, + 298, + 392 + ], + "blocks": [ + { + "bbox": [ + 55, + 140, + 295, + 183 + ], + "lines": [ + { + "bbox": [ + 55, + 140, + 295, + 183 + ], + "spans": [ + { + "bbox": [ + 55, + 140, + 295, + 183 + ], + "type": "text", + "content": "Table 4. Ratio of time consumptions for tokenizer decoding during image generation. When we use a 2.9B XLXXL tokenizer for a 1.4B LlamaGen-XXL AR model, the tokenizer decoding only takes " + }, + { + "bbox": [ + 55, + 140, + 295, + 183 + ], + "type": "inline_equation", + "content": "9.9\\%" + }, + { + "bbox": [ + 55, + 140, + 295, + 183 + ], + "type": "text", + "content": " of the total inference time." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 56, + 331, + 298, + 392 + ], + "lines": [ + { + "bbox": [ + 56, + 331, + 298, + 392 + ], + "spans": [ + { + "bbox": [ + 56, + 331, + 298, + 392 + ], + "type": "table", + "html": "
Align. Layer lrFID↓LPIPS↓gFID↓Lin Acc.↑
21.060.2246.2663.4
31.010.2236.1061.9
41.070.2236.0758.6
", + "image_path": "86e9797e3f6e778b4e27c04bb9c93e8c669942a1dfc25453926910afa82086d5.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 56, + 465, + 304, + 526 + ], + "blocks": [ + { + "bbox": [ + 55, + 400, + 296, + 445 + ], + "lines": [ + { + "bbox": [ + 55, + 400, + 296, + 445 + ], + "spans": [ + { + "bbox": [ + 55, + 400, + 296, + 445 + ], + "type": "text", + "content": "Table 5. Layer " + }, + { + "bbox": [ + 55, + 400, + 296, + 445 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 55, + 400, + 296, + 445 + ], + "type": "text", + "content": " for semantic regularization (S-S tokenizer). Smaller " + }, + { + "bbox": [ + 55, + 400, + 296, + 445 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 55, + 400, + 296, + 445 + ], + "type": "text", + "content": " brings better downstream AR model representations but can sacrifice reconstruction and downstream generation quality. We choose " + }, + { + "bbox": [ + 55, + 400, + 296, + 445 + ], + "type": "inline_equation", + "content": "l = 3" + }, + { + "bbox": [ + 55, + 400, + 296, + 445 + ], + "type": "text", + "content": " by default for more balanced performance." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 56, + 465, + 304, + 526 + ], + "lines": [ + { + "bbox": [ + 56, + 465, + 304, + 526 + ], + "spans": [ + { + "bbox": [ + 56, + 465, + 304, + 526 + ], + "type": "table", + "html": "
Sem. Enc.rFID↓LPIPS↓gFID↓Lin Acc.↑
CLIP [16, 46]0.910.2106.3561.4
SigLIP [72]0.920.2106.2056.7
DINOv2-B [43]0.850.2125.5564.4
", + "image_path": "6d6262b791a8ccca0ad91111cb5a0b1313980e427f8b7c97c20147297de64888.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 582, + 295, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 582, + 295, + 677 + ], + "spans": [ + { + "bbox": [ + 55, + 582, + 295, + 677 + ], + "type": "text", + "content": "Discussion on generation costs. When generating an image, AR models take multiple passes to predict tokens, while tokenizers only need one forward pass. Therefore, the time consumption for decoding tokens to images is relatively small compared to AR models. We record the ratio of time spent on tokenizer decoding for different tokenizer/AR models in Tab. 4. For a 1.4B AR model, our largest 2.9B tokenizer takes only " + }, + { + "bbox": [ + 55, + 582, + 295, + 677 + ], + "type": "inline_equation", + "content": "\\sim 10\\%" + }, + { + "bbox": [ + 55, + 582, + 295, + 677 + ], + "type": "text", + "content": " of the total inference time." + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 318, + 70, + 553, + 144 + ], + "blocks": [ + { + "bbox": [ + 55, + 534, + 295, + 568 + ], + "lines": [ + { + "bbox": [ + 55, + 534, + 295, + 568 + ], + "spans": [ + { + "bbox": [ + 55, + 534, + 295, + 568 + ], + "type": "text", + "content": "Table 6. Ablation study for the choice of pretrained semantic encoders (S-B tokenizer). DINOv2-B delivers the best performance among all models." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 318, + 70, + 553, + 144 + ], + "lines": [ + { + "bbox": [ + 318, + 70, + 553, + 144 + ], + "spans": [ + { + "bbox": [ + 318, + 70, + 553, + 144 + ], + "type": "table", + "html": "
Sem. Reg. λrFID↓LPIPS↓gFID↓Lin Acc.↑
0.251.280.2266.2757.0
0.501.220.2286.3958.6
0.751.270.2366.2958.6
1.001.380.2396.2762.5
", + "image_path": "f4034ae46c2d3aa8bd499318635ef2d35fe3c4609eb808a740ddf39df9fd08b0.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 152, + 555, + 196 + ], + "lines": [ + { + "bbox": [ + 313, + 152, + 555, + 196 + ], + "spans": [ + { + "bbox": [ + 313, + 152, + 555, + 196 + ], + "type": "text", + "content": "Table 7. Ablation Study for the semantic regularization weight (S-S tokenizer). A strong semantic regularization weight leads to worse reconstruction but better downstream representation. We choose " + }, + { + "bbox": [ + 313, + 152, + 555, + 196 + ], + "type": "inline_equation", + "content": "\\lambda = 0.5" + }, + { + "bbox": [ + 313, + 152, + 555, + 196 + ], + "type": "text", + "content": " by default for more balanced performance." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 313, + 217, + 555, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 217, + 555, + 338 + ], + "spans": [ + { + "bbox": [ + 313, + 217, + 555, + 338 + ], + "type": "text", + "content": "Searching the best layer for semantic regularization. We search " + }, + { + "bbox": [ + 313, + 217, + 555, + 338 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 313, + 217, + 555, + 338 + ], + "type": "text", + "content": ", the layer's index in the Transformer decoder before intermediate features are extracted to calculate semantic regularization in Eq. 1. As shown in Tab. 5, varying " + }, + { + "bbox": [ + 313, + 217, + 555, + 338 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 313, + 217, + 555, + 338 + ], + "type": "text", + "content": " presents a trade-off between gFID and the Lin Acc. for AR Probing. Smaller " + }, + { + "bbox": [ + 313, + 217, + 555, + 338 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 313, + 217, + 555, + 338 + ], + "type": "text", + "content": " means stricter regularization for the latent space so that the downstream generation models learn better representation. However, smaller " + }, + { + "bbox": [ + 313, + 217, + 555, + 338 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 313, + 217, + 555, + 338 + ], + "type": "text", + "content": " also sacrifices generation quality. We choose " + }, + { + "bbox": [ + 313, + 217, + 555, + 338 + ], + "type": "inline_equation", + "content": "l = 3" + }, + { + "bbox": [ + 313, + 217, + 555, + 338 + ], + "type": "text", + "content": " for a more balanced rFID, gFID, and linear probing accuracy for all tokenizers." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 339, + 556, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 339, + 556, + 425 + ], + "spans": [ + { + "bbox": [ + 313, + 339, + 556, + 425 + ], + "type": "text", + "content": "Exploring pretrained semantic encoder choices. We compare CLIP (DFN) [16, 46], SigLIP-400M [72] and DINOv2-B [43] as the source of semantic regularization for S-B tokenizers. As shown in Tab. 6, utilizing DINOv2-B as the semantic encoder for regularization produces the best tokenizer for reconstruction, downstream class conditional generation and representation quality." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 426, + 556, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 426, + 556, + 510 + ], + "spans": [ + { + "bbox": [ + 313, + 426, + 556, + 510 + ], + "type": "text", + "content": "Exploring weights for semantic regularization. We study the effects of different regularization weights " + }, + { + "bbox": [ + 313, + 426, + 556, + 510 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 313, + 426, + 556, + 510 + ], + "type": "text", + "content": " (Eq. 2), from 0.25 to 1.00. As shown in Tab. 7, a large " + }, + { + "bbox": [ + 313, + 426, + 556, + 510 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 313, + 426, + 556, + 510 + ], + "type": "text", + "content": " (0.75, 1.00) will damage the reconstruction quality but benefits the linear probing accuracy, whereas smaller " + }, + { + "bbox": [ + 313, + 426, + 556, + 510 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 313, + 426, + 556, + 510 + ], + "type": "text", + "content": " (0.25) results in suboptimal rFID and linear probing accuracy. We choose the more balanced " + }, + { + "bbox": [ + 313, + 426, + 556, + 510 + ], + "type": "inline_equation", + "content": "\\lambda = 0.5" + }, + { + "bbox": [ + 313, + 426, + 556, + 510 + ], + "type": "text", + "content": " as a default for all tokenizers." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 521, + 388, + 533 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 521, + 388, + 533 + ], + "spans": [ + { + "bbox": [ + 313, + 521, + 388, + 533 + ], + "type": "text", + "content": "6. Conclusion" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 540, + 556, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 540, + 556, + 685 + ], + "spans": [ + { + "bbox": [ + 313, + 540, + 556, + 685 + ], + "type": "text", + "content": "In this work, we study and address the reconstruction vs. generation dilemma for scaling visual tokenizers. We identify that the dilemma stems from increasing latent space complexity in larger tokenizers. We propose semantic regularization to effectively regularize the tokenizer latent space by injecting pre-trained representations to align with tokenizer features in training. The semantic regularization, together with several key practices we explored, lead to the first 3B tokenizer, GigaTok, that achieves state-of-the-art reconstruction, downstream AR generation, and downstream AR representation quality. Please refer to discussions on limitations and future work in supplementary materials." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 55, + 683, + 295, + 714 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 683, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 683, + 295, + 714 + ], + "type": "text", + "content": "REPA [71] achieves better representation by directly distilling pretrained representations to the generation model, which is not a fair comparison with ours as we do not leverage the supervision for AR training." + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 153, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 153, + 85 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 153, + 85 + ], + "type": "text", + "content": "Acknowledgments" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 91, + 294, + 114 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 91, + 294, + 114 + ], + "spans": [ + { + "bbox": [ + 55, + 91, + 294, + 114 + ], + "type": "text", + "content": "This work is partially supported by the National Nature Science Foundation of China (No. 62402406)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 115, + 294, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 115, + 294, + 152 + ], + "spans": [ + { + "bbox": [ + 55, + 115, + 294, + 152 + ], + "type": "text", + "content": "The authors also sincerely thank Qihang Yu and Liang-Chieh Chen for their valuable discussions during the development of GigaTok." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 160, + 115, + 173 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 160, + 115, + 173 + ], + "spans": [ + { + "bbox": [ + 56, + 160, + 115, + 173 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 57, + 180, + 295, + 713 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 61, + 180, + 294, + 202 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 180, + 294, + 202 + ], + "spans": [ + { + "bbox": [ + 61, + 180, + 294, + 202 + ], + "type": "text", + "content": "[1] stabilityyai/sd-vae-ft-ema. https://huggingface.co/stabilityyai/sd-vae-ft-ema, 2023.7" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 62, + 203, + 295, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 203, + 295, + 258 + ], + "spans": [ + { + "bbox": [ + 62, + 203, + 295, + 258 + ], + "type": "text", + "content": "[2] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023. 1" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 62, + 259, + 295, + 313 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 259, + 295, + 313 + ], + "spans": [ + { + "bbox": [ + 62, + 259, + 295, + 313 + ], + "type": "text", + "content": "[3] Roman Bachmann, Jesse Allardice, David Mizrahi, Enrico Fini, Oğuzhan Fatih Kar, Elmira Amirloo, Alaaeldin El-Nouby, Amir Zamir, and Afshin Dehghan. Flextok: Resampling images into 1d token sequences of flexible length. arXiv preprint arXiv:2502.13967, 2025. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 62, + 315, + 295, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 315, + 295, + 369 + ], + "spans": [ + { + "bbox": [ + 62, + 315, + 295, + 369 + ], + "type": "text", + "content": "[4] Fan Bao, Shen Nie, Kaiwen Xue, Yue Cao, Chongxuan Li, Hang Su, and Jun Zhu. All are worth words: A vit backbone for diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 22669-22679, 2023. 7" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 62, + 370, + 295, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 370, + 295, + 423 + ], + "spans": [ + { + "bbox": [ + 62, + 370, + 295, + 423 + ], + "type": "text", + "content": "[5] Xiao Bi, Deli Chen, Guanting Chen, Shanhuang Chen, Damai Dai, Chengqi Deng, Honghui Ding, Kai Dong, Qiushi Du, Zhe Fu, et al. Deepseek llm: Scaling open-source language models with longtermism. arXiv preprint arXiv:2401.02954, 2024. 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 62, + 425, + 295, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 425, + 295, + 479 + ], + "spans": [ + { + "bbox": [ + 62, + 425, + 295, + 479 + ], + "type": "text", + "content": "[6] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European conference on computer vision, pages 213-229. Springer, 2020. 4, 5, 1" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 62, + 480, + 295, + 535 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 480, + 295, + 535 + ], + "spans": [ + { + "bbox": [ + 62, + 480, + 295, + 535 + ], + "type": "text", + "content": "[7] Mathilde Caron, Hugo Touvron, Ishan Misra, Hervé Jégou, Julien Mairal, Piotr Bojanowski, and Armand Joulin. Emerging properties in self-supervised vision transformers. In Proceedings of the IEEE/CVF international conference on computer vision, pages 9650-9660, 2021. 2, 7, 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 62, + 536, + 295, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 536, + 295, + 590 + ], + "spans": [ + { + "bbox": [ + 62, + 536, + 295, + 590 + ], + "type": "text", + "content": "[8] Huiwen Chang, Han Zhang, Lu Jiang, Ce Liu, and William T Freeman. Maskgit: Masked generative image transformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11315-11325, 2022. 2, 7" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 62, + 591, + 295, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 591, + 295, + 645 + ], + "spans": [ + { + "bbox": [ + 62, + 591, + 295, + 645 + ], + "type": "text", + "content": "[9] Hao Chen, Ze Wang, Xiang Li, Xineng Sun, Fangyi Chen, Jiang Liu, Jindong Wang, Bhiksha Raj, Zicheng Liu, and Emad Barsoum. Softvq-vae: Efficient 1-dimensional continuous tokenizer. arXiv preprint arXiv:2412.10958, 2024. 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 57, + 647, + 295, + 691 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 647, + 295, + 691 + ], + "spans": [ + { + "bbox": [ + 57, + 647, + 295, + 691 + ], + "type": "text", + "content": "[10] Hao Chen, Yujin Han, Fangyi Chen, Xiang Li, Yidong Wang, Jindong Wang, Ze Wang, Zicheng Liu, Difan Zou, and Bhiksha Raj. Masked autoencoders are effective tokenizers for diffusion models. arXiv preprint arXiv:2502.03444, 2025. 3" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 57, + 692, + 295, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 692, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 57, + 692, + 295, + 713 + ], + "type": "text", + "content": "[11] Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, and Ilya Sutskever. Generative pre" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 553, + 714 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 333, + 73, + 553, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 73, + 553, + 95 + ], + "spans": [ + { + "bbox": [ + 333, + 73, + 553, + 95 + ], + "type": "text", + "content": "training from pixels. In International conference on machine learning, pages 1691-1703. PMLR, 2020. 3" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 96, + 553, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 96, + 553, + 150 + ], + "spans": [ + { + "bbox": [ + 316, + 96, + 553, + 150 + ], + "type": "text", + "content": "[12] Xiaokang Chen, Zhiyu Wu, Xingchao Liu, Zizheng Pan, Wen Liu, Zhenda Xie, Xingkai Yu, and Chong Ruan. Januspro: Unified multimodal understanding and generation with data and model scaling. arXiv preprint arXiv:2501.17811, 2025. 1" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 152, + 553, + 185 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 152, + 553, + 185 + ], + "spans": [ + { + "bbox": [ + 316, + 152, + 553, + 185 + ], + "type": "text", + "content": "[13] Alexey Dosovitskiy. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020. 1, 4, 5" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 317, + 186, + 553, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 186, + 553, + 239 + ], + "spans": [ + { + "bbox": [ + 317, + 186, + 553, + 239 + ], + "type": "text", + "content": "[14] Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024. 1" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 241, + 553, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 241, + 553, + 296 + ], + "spans": [ + { + "bbox": [ + 316, + 241, + 553, + 296 + ], + "type": "text", + "content": "[15] Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 12873-12883, 2021. 1, 2, 4, 5, 7" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 298, + 553, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 298, + 553, + 331 + ], + "spans": [ + { + "bbox": [ + 316, + 298, + 553, + 331 + ], + "type": "text", + "content": "[16] Alex Fang, Albin Madappally Jose, Amit Jain, Ludwig Schmidt, Alexander Toshev, and Vaishaal Shankar. Data filtering networks. arXiv preprint arXiv:2309.17425, 2023. 8" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 332, + 553, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 332, + 553, + 376 + ], + "spans": [ + { + "bbox": [ + 316, + 332, + 553, + 376 + ], + "type": "text", + "content": "[17] Christopher Fifty, Ronald G Junkins, Dennis Duan, Aniketh Iger, Jerry W Liu, Ehsan Amid, Sebastian Thrun, and Christopher Ré. Restructuring vector quantization with the rotation trick. arXiv preprint arXiv:2410.06424, 2024. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 377, + 553, + 410 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 377, + 553, + 410 + ], + "spans": [ + { + "bbox": [ + 316, + 377, + 553, + 410 + ], + "type": "text", + "content": "[18] Yuying Ge, Yixiao Ge, Ziyun Zeng, Xintao Wang, and Ying Shan. Planting a seed of vision in large language model. arXiv preprint arXiv:2307.08041, 2023. 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 317, + 411, + 553, + 465 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 411, + 553, + 465 + ], + "spans": [ + { + "bbox": [ + 317, + 411, + 553, + 465 + ], + "type": "text", + "content": "[19] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025. 1" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 317, + 468, + 553, + 511 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 468, + 553, + 511 + ], + "spans": [ + { + "bbox": [ + 317, + 468, + 553, + 511 + ], + "type": "text", + "content": "[20] Alexander Hagiéle, Elie Bakouch, Atli Kosson, Loubna Ben Allal, Leandro Von Werra, and Martin Jaggi. Scaling laws and compute-optimal training beyond fixed training durations. arXiv preprint arXiv:2405.18392, 2024. 1" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 317, + 513, + 553, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 513, + 553, + 567 + ], + "spans": [ + { + "bbox": [ + 317, + 513, + 553, + 567 + ], + "type": "text", + "content": "[21] Philippe Hansen-Estruch, David Yan, Ching-Yao Chung, Orr Zohar, Jialiang Wang, Tingbo Hou, Tao Xu, Sriram Vishwanath, Peter Vajda, and Xinlei Chen. Learnings from scaling visual tokenizers for reconstruction and generation. arXiv preprint arXiv:2501.09755, 2025. 1, 4" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 317, + 568, + 553, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 568, + 553, + 622 + ], + "spans": [ + { + "bbox": [ + 317, + 568, + 553, + 622 + ], + "type": "text", + "content": "[22] Shaozhe Hao, Xuantong Liu, Xianbiao Qi, Shihao Zhao, Bojia Zi, Rong Xiao, Kai Han, and Kwan-Yee K Wong. Bigrr: Harnessing binary latent codes for image generation and improved visual representation capabilities. arXiv preprint arXiv:2410.14672, 2024. 7" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 316, + 624, + 553, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 624, + 553, + 678 + ], + "spans": [ + { + "bbox": [ + 316, + 624, + 553, + 678 + ], + "type": "text", + "content": "[23] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, and Ross Girshick. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 16000-16009, 2022. 2, 3" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 316, + 680, + 553, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 680, + 553, + 714 + ], + "spans": [ + { + "bbox": [ + 316, + 680, + 553, + 714 + ], + "type": "text", + "content": "[24] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilib-" + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 73, + 295, + 713 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 77, + 73, + 294, + 94 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 73, + 294, + 94 + ], + "spans": [ + { + "bbox": [ + 77, + 73, + 294, + 94 + ], + "type": "text", + "content": "rium. Advances in neural information processing systems, 30, 2017. 3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 96, + 295, + 151 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 96, + 295, + 151 + ], + "spans": [ + { + "bbox": [ + 56, + 96, + 295, + 151 + ], + "type": "text", + "content": "[25] Shengding Hu, Yuge Tu, Xu Han, Chaoqun He, Ganqu Cui, Xiang Long, Zhi Zheng, Yewei Fang, Yuxiang Huang, Weilin Zhao, et al. Minicpm: Unveiling the potential of small language models with scalable training strategies. arXiv preprint arXiv:2404.06395, 2024. 1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 152, + 294, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 152, + 294, + 206 + ], + "spans": [ + { + "bbox": [ + 56, + 152, + 294, + 206 + ], + "type": "text", + "content": "[26] Phillip Isola, Jun-Yan Zhu, Tinghui Zhou, and Alexei A Efros. Image-to-image translation with conditional adversarial networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1125-1134, 2017. 5" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 209, + 294, + 263 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 209, + 294, + 263 + ], + "spans": [ + { + "bbox": [ + 56, + 209, + 294, + 263 + ], + "type": "text", + "content": "[27] Justin Johnson, Alexandre Alahi, and Li Fei-Fei. Perceptual losses for real-time style transfer and super-resolution. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part II 14, pages 694-711. Springer, 2016. 5" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 264, + 294, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 264, + 294, + 286 + ], + "spans": [ + { + "bbox": [ + 56, + 264, + 294, + 286 + ], + "type": "text", + "content": "[28] Diederik P Kingma. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013. 2, 4" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 288, + 294, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 288, + 294, + 320 + ], + "spans": [ + { + "bbox": [ + 56, + 288, + 294, + 320 + ], + "type": "text", + "content": "[29] Diederik P Kingma, Max Welling, et al. An introduction to variational autoencoders. Foundations and Trends® in Machine Learning, 12(4):307-392, 2019. 4" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 322, + 294, + 452 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 322, + 294, + 452 + ], + "spans": [ + { + "bbox": [ + 56, + 322, + 294, + 452 + ], + "type": "text", + "content": "[30] Dan Kondratyuk, Lijun Yu, Xiuye Gu, Jose Lezama, Jonathan Huang, Grant Schindler, Rachel Hornung, Vighnesh Birodkar, Jimmy Yan, Ming-Chang Chiu, Krishna Somandepalli, Hassan Akbari, Yair Alon, Yong Cheng, Joshua V. Dillon, Agrim Gupta, Meera Hahn, Anja Hauth, David Hendon, Alonso Martinez, David Minnen, Mikhail Sirotenko, Kihyuk Sohn, Xuan Yang, Hartwig Adam, Ming-Hsuan Yang, Irfan Essa, Huisheng Wang, David A Ross, Bryan Seybold, and Lu Jiang. Videopoet: A large language model for zero-shot video generation. In Proceedings of the 41st International Conference on Machine Learning, 2024. 1" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 455, + 294, + 508 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 455, + 294, + 508 + ], + "spans": [ + { + "bbox": [ + 56, + 455, + 294, + 508 + ], + "type": "text", + "content": "[31] Tuomas Kynkänniemi, Miika Aittala, Tero Karras, Samuli Laine, Timo Aila, and Jaakko Lehtinen. Applying guidance in a limited interval improves sample and distribution quality in diffusion models. arXiv preprint arXiv:2404.07724, 2024. 1" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 510, + 294, + 543 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 510, + 294, + 543 + ], + "spans": [ + { + "bbox": [ + 56, + 510, + 294, + 543 + ], + "type": "text", + "content": "[32] Yann LeCun, Yoshua Bengio, et al. Convolutional networks for images, speech, and time series. The handbook of brain theory and neural networks, 3361(10):1995, 1995. 4" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 544, + 294, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 544, + 294, + 599 + ], + "spans": [ + { + "bbox": [ + 56, + 544, + 294, + 599 + ], + "type": "text", + "content": "[33] Doyup Lee, Chiheon Kim, Saehoon Kim, Minsu Cho, and Wook-Shin Han. Autoregressive image generation using residual quantization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11523-11532, 2022. 2, 7" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 601, + 294, + 655 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 601, + 294, + 655 + ], + "spans": [ + { + "bbox": [ + 56, + 601, + 294, + 655 + ], + "type": "text", + "content": "[34] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In International conference on machine learning, pages 19730-19742. PMLR, 2023. 4, 5, 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 657, + 294, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 657, + 294, + 689 + ], + "spans": [ + { + "bbox": [ + 56, + 657, + 294, + 689 + ], + "type": "text", + "content": "[35] Tianhong Li, Yonglong Tian, He Li, Mingyang Deng, and Kaiming He. Autoregressive image generation without vector quantization. arXiv preprint arXiv:2406.11838, 2024. 7" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 691, + 294, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 691, + 294, + 713 + ], + "spans": [ + { + "bbox": [ + 56, + 691, + 294, + 713 + ], + "type": "text", + "content": "[36] Xiang Li, Kai Qiu, Hao Chen, Jason Kuen, Jiquiang Gu, Bhiksha Raj, and Zhe Lin. Imagefolder: Autoregres" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 555, + 713 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 335, + 73, + 553, + 94 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 73, + 553, + 94 + ], + "spans": [ + { + "bbox": [ + 335, + 73, + 553, + 94 + ], + "type": "text", + "content": "sive image generation with folded tokens. arXiv preprint arXiv:2410.01756, 2024. 2, 3, 7, 4" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 316, + 96, + 553, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 96, + 553, + 140 + ], + "spans": [ + { + "bbox": [ + 316, + 96, + 553, + 140 + ], + "type": "text", + "content": "[37] Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024. 1" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 141, + 553, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 141, + 553, + 195 + ], + "spans": [ + { + "bbox": [ + 316, + 141, + 553, + 195 + ], + "type": "text", + "content": "[38] Dongyang Liu, Shitian Zhao, Le Zhuo, Weifeng Lin, Yu Qiao, Hongsheng Li, and Peng Gao. Lumina-mgpt: Illuminate flexible photorealistic text-to-image generation with multimodal generative pretraining. arXiv preprint arXiv:2408.02657, 2024. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 198, + 553, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 198, + 553, + 229 + ], + "spans": [ + { + "bbox": [ + 316, + 198, + 553, + 229 + ], + "type": "text", + "content": "[39] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In International Conference on Learning Representations, 2019. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 232, + 553, + 275 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 232, + 553, + 275 + ], + "spans": [ + { + "bbox": [ + 316, + 232, + 553, + 275 + ], + "type": "text", + "content": "[40] Zhuoyan Luo, Fengyuan Shi, Yixiao Ge, Yujiu Yang, Limin Wang, and Ying Shan. Open-magvit2: An open-source project toward democratizing auto-regressive visual generation. arXiv preprint arXiv:2409.04410, 2024. 2, 4, 7" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 277, + 553, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 277, + 553, + 320 + ], + "spans": [ + { + "bbox": [ + 316, + 277, + 553, + 320 + ], + "type": "text", + "content": "[41] Chuofan Ma, Yi Jiang, Junfeng Wu, Jihan Yang, Xin Yu, Zehuan Yuan, Bingyue Peng, and Xiaojuan Qi. Unitok: A unified tokenizer for visual generation and understanding. arXiv preprint arXiv:2502.20321, 2025. 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 322, + 553, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 322, + 553, + 376 + ], + "spans": [ + { + "bbox": [ + 316, + 322, + 553, + 376 + ], + "type": "text", + "content": "[42] Nanye Ma, Mark Goldstein, Michael S Albergo, Nicholas M Boffi, Eric Vanden-Eijnden, and Saining Xie. Sit: Exploring flow and diffusion-based generative models with scalable interpolant transformers. In European Conference on Computer Vision, pages 23-40. Springer, 2024. 7" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 378, + 553, + 433 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 378, + 553, + 433 + ], + "spans": [ + { + "bbox": [ + 316, + 378, + 553, + 433 + ], + "type": "text", + "content": "[43] Maxime Oquab, Timothee Darcet, Theo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193, 2023. 2, 4, 8, 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 434, + 553, + 477 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 434, + 553, + 477 + ], + "spans": [ + { + "bbox": [ + 316, + 434, + 553, + 477 + ], + "type": "text", + "content": "[44] William Peebles and Saining Xie. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4195-4205, 2023. 7, 1" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 479, + 553, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 479, + 553, + 533 + ], + "spans": [ + { + "bbox": [ + 316, + 479, + 553, + 533 + ], + "type": "text", + "content": "[45] Liao Qu, Huichao Zhang, Yiheng Liu, Xu Wang, Yi Jiang, Yiming Gao, Hu Ye, Daniel K Du, Zehuan Yuan, and Xinglong Wu. Tokenflow: Unified image tokenizer for multimodal understanding and generation. arXiv preprint arXiv:2412.03069, 2024. 1" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 535, + 553, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 535, + 553, + 599 + ], + "spans": [ + { + "bbox": [ + 316, + 535, + 553, + 599 + ], + "type": "text", + "content": "[46] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PmLR, 2021. 2, 8" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 601, + 555, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 601, + 555, + 657 + ], + "spans": [ + { + "bbox": [ + 316, + 601, + 555, + 657 + ], + "type": "text", + "content": "[47] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 7, 1" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 658, + 555, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 658, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 316, + 658, + 555, + 713 + ], + "type": "text", + "content": "[48] Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. International journal of computer vision, 115:211-252, 2015. 3" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 294, + 712 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 56, + 72, + 294, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 294, + 116 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 294, + 116 + ], + "type": "text", + "content": "[49] Fengyuan Shi, Zhuoyan Luo, Yixiao Ge, Yujiu Yang, Ying Shan, and Limin Wang. Taming scalable visual tokenizer for autoregressive image generation. arXiv preprint arXiv:2412.02692, 2024. 2, 7" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 118, + 294, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 118, + 294, + 160 + ], + "spans": [ + { + "bbox": [ + 56, + 118, + 294, + 160 + ], + "type": "text", + "content": "[50] Peize Sun, Yi Jiang, Shoufa Chen, Shilong Zhang, Bingyue Peng, Ping Luo, and Zehuan Yuan. Autoregressive model beats diffusion: Llama for scalable image generation. 2024. 1, 2, 3, 4, 5, 7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 162, + 294, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 162, + 294, + 215 + ], + "spans": [ + { + "bbox": [ + 56, + 162, + 294, + 215 + ], + "type": "text", + "content": "[51] Gemini Team, Rohan Anil, Sebastian Borgeaud, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, Katie Millican, et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023. 1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 217, + 294, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 217, + 294, + 270 + ], + "spans": [ + { + "bbox": [ + 56, + 217, + 294, + 270 + ], + "type": "text", + "content": "[52] Gemini Team, Petko Georgiev, Ving Ian Lei, Ryan Burnell, Libin Bai, Anmol Gulati, Garrett Tanzer, Damien Vincent, Zhufeng Pan, Shibo Wang, et al. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530, 2024. 1" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 271, + 294, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 271, + 294, + 326 + ], + "spans": [ + { + "bbox": [ + 56, + 271, + 294, + 326 + ], + "type": "text", + "content": "[53] Keyu Tian, Yi Jiang, Zehuan Yuan, BINGYUE PENG, and Liwei Wang. Visual autoregressive modeling: Scalable image generation via next-scale prediction. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. 2, 7, 1, 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 327, + 294, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 327, + 294, + 380 + ], + "spans": [ + { + "bbox": [ + 56, + 327, + 294, + 380 + ], + "type": "text", + "content": "[54] Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023. 1, 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 382, + 294, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 382, + 294, + 435 + ], + "spans": [ + { + "bbox": [ + 56, + 382, + 294, + 435 + ], + "type": "text", + "content": "[55] Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023. 1" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 437, + 294, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 437, + 294, + 469 + ], + "spans": [ + { + "bbox": [ + 56, + 437, + 294, + 469 + ], + "type": "text", + "content": "[56] Aaron Van Den Oord, Oriol Vinyals, et al. Neural discrete representation learning. Advances in neural information processing systems, 30, 2017. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 471, + 294, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 471, + 294, + 491 + ], + "spans": [ + { + "bbox": [ + 56, + 471, + 294, + 491 + ], + "type": "text", + "content": "[57] A Vaswani. Attention is all you need. Advances in Neural Information Processing Systems, 2017. 4" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 493, + 294, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 493, + 294, + 525 + ], + "spans": [ + { + "bbox": [ + 56, + 493, + 294, + 525 + ], + "type": "text", + "content": "[58] Hanyu Wang, Saksham Suri, Yixuan Ren, Hao Chen, and Abhinav Shrivastava. Larp: Tokenizing videos with a learned autoregressive generative prior. In ICLR, 2025. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 526, + 294, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 526, + 294, + 568 + ], + "spans": [ + { + "bbox": [ + 56, + 526, + 294, + 568 + ], + "type": "text", + "content": "[59] Luting Wang, Yang Zhao, Zijian Zhang, Jiashi Feng, Si Liu, and Bingyi Kang. Image understanding makes for a good tokenizer for image generation. arXiv preprint arXiv:2411.04406, 2024. 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 570, + 294, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 570, + 294, + 613 + ], + "spans": [ + { + "bbox": [ + 56, + 570, + 294, + 613 + ], + "type": "text", + "content": "[60] Xinlong Wang, Xiaosong Zhang, Zhengxiong Luo, Quan Sun, Yufeng Cui, Jinsheng Wang, Fan Zhang, Yueze Wang, Zhen Li, Qiying Yu, et al. Emu3: Next-token prediction is all you need. arXiv preprint arXiv:2409.18869, 2024. 1, 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 615, + 294, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 615, + 294, + 657 + ], + "spans": [ + { + "bbox": [ + 56, + 615, + 294, + 657 + ], + "type": "text", + "content": "[61] Mark Weber, Lijun Yu, Qihang Yu, Xueqing Deng, Xiaohui Shen, Daniel Cremers, and Liang-Chieh Chen. Maskbit: Embedding-free image generation via bit tokens. arXiv preprint arXiv:2409.16211, 2024. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 658, + 294, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 658, + 294, + 712 + ], + "spans": [ + { + "bbox": [ + 56, + 658, + 294, + 712 + ], + "type": "text", + "content": "[62] Chengyue Wu, Xiaokang Chen, Zhiyu Wu, Yiyang Ma, Xingchao Liu, Zizheng Pan, Wen Liu, Zhenda Xie, Xingkai Yu, Chong Ruan, et al. Janus: Decoupling visual encoding for unified multimodal understanding and generation. arXiv preprint arXiv:2410.13848, 2024. 1" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 72, + 554, + 712 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 316, + 72, + 554, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 72, + 554, + 126 + ], + "spans": [ + { + "bbox": [ + 316, + 72, + 554, + 126 + ], + "type": "text", + "content": "[63] Yecheng Wu, Zhuoyang Zhang, Junyu Chen, Haotian Tang, Dacheng Li, Yunhao Fang, Ligeng Zhu, Enze Xie, Hongxu Yin, Li Yi, et al. Vila-u: a unified foundation model integrating visual understanding and generation. arXiv preprint arXiv:2409.04429, 2024. 3" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 129, + 554, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 129, + 554, + 171 + ], + "spans": [ + { + "bbox": [ + 316, + 129, + 554, + 171 + ], + "type": "text", + "content": "[64] Wanghan Xu, Xiaoyu Yue, Zidong Wang, Yao Teng, Wenlong Zhang, Xihui Liu, Luping Zhou, Wanli Ouyang, and Lei Bai. Exploring representation-aligned latent space for better generation. arXiv preprint arXiv:2502.00359, 2025." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 173, + 554, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 173, + 554, + 205 + ], + "spans": [ + { + "bbox": [ + 316, + 173, + 554, + 205 + ], + "type": "text", + "content": "[65] Jingfeng Yao and Xinggang Wang. Reconstruction vs. generation: Taming optimization dilemma in latent diffusion models. arXiv preprint arXiv:2501.01423, 2025. 3, 7, 4" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 206, + 554, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 206, + 554, + 258 + ], + "spans": [ + { + "bbox": [ + 316, + 206, + 554, + 258 + ], + "type": "text", + "content": "[66] Jiahui Yu, Xin Li, Jing Yu Koh, Han Zhang, Ruoming Pang, James Qin, Alexander Ku, Yuanzhong Xu, Jason Baldridge, and Yonghui Wu. Vector-quantized image modeling with improved vqgan. arXiv preprint arXiv:2110.04627, 2021. 1, 2, 5, 7" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 261, + 554, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 261, + 554, + 326 + ], + "spans": [ + { + "bbox": [ + 316, + 261, + 554, + 326 + ], + "type": "text", + "content": "[67] Lijun Yu, Yong Cheng, Kihyuk Sohn, José Lezama, Han Zhang, Huiwen Chang, Alexander G Hauptmann, Ming-Hsuan Yang, Yuan Hao, Irfan Essa, et al. Magvit: Masked generative video transformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10459-10469, 2023. 5, 1" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 327, + 554, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 327, + 554, + 392 + ], + "spans": [ + { + "bbox": [ + 316, + 327, + 554, + 392 + ], + "type": "text", + "content": "[68] Lijun Yu, Yong Cheng, Zhiruo Wang, Vivek Kumar, Wolfgang Macherey, Yanping Huang, David Ross, Irfan Essa, Yonatan Bisk, Ming-Hsuan Yang, et al. Spae: Semantic pyramid autoencoder for multimodal generation with frozen llms. Advances in Neural Information Processing Systems, 36:52692-52704, 2023. 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 393, + 554, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 393, + 554, + 448 + ], + "spans": [ + { + "bbox": [ + 316, + 393, + 554, + 448 + ], + "type": "text", + "content": "[69] Lijun Yu, José Lezama, Nitesh B Gundavarapu, Luca Versari, Kihyuk Sohn, David Minnen, Yong Cheng, Vighnesh Birodkar, Agrim Gupta, Xiuye Gu, et al. Language model beats diffusion-tokenizer is key to visual generation. arXiv preprint arXiv:2310.05737, 2023. 1, 2, 4, 5" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 449, + 554, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 449, + 554, + 491 + ], + "spans": [ + { + "bbox": [ + 316, + 449, + 554, + 491 + ], + "type": "text", + "content": "[70] Qihang Yu, Mark Weber, Xueqing Deng, Xiaohui Shen, Daniel Cremers, and Liang-Chieh Chen. An image is worth 32 tokens for reconstruction and generation. arXiv preprint arXiv:2406.07550, 2024. 2, 7, 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 493, + 554, + 546 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 493, + 554, + 546 + ], + "spans": [ + { + "bbox": [ + 316, + 493, + 554, + 546 + ], + "type": "text", + "content": "[71] Sihyun Yu, Sangkyung Kwak, Huiwon Jang, Jongheon Jeong, Jonathan Huang, Jinwoo Shin, and Saining Xie. Representation alignment for generation: Training diffusion transformers is easier than you think. arXiv preprint arXiv:2410.06940, 2024. 3, 7, 8, 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 548, + 554, + 591 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 548, + 554, + 591 + ], + "spans": [ + { + "bbox": [ + 316, + 548, + 554, + 591 + ], + "type": "text", + "content": "[72] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In Proceedings of the IEEE/CVF international conference on computer vision, pages 11975-11986, 2023. 2, 8" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 593, + 554, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 593, + 554, + 656 + ], + "spans": [ + { + "bbox": [ + 316, + 593, + 554, + 656 + ], + "type": "text", + "content": "[73] Baoquan Zhang, Huaibin Wang, Chuyao Luo, Xutao Li, Guotao Liang, Yunming Ye, Xiaochen Qi, and Yao He. Codebook transfer with part-of-speech for vector-quantized image modeling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7757–7766, 2024. 3" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 658, + 554, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 658, + 554, + 712 + ], + "spans": [ + { + "bbox": [ + 316, + 658, + 554, + 712 + ], + "type": "text", + "content": "[74] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 586-595, 2018. 5" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 295, + 184 + ], + "type": "list", + "angle": 0, + "index": 3, + "blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 106 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 106 + ], + "type": "text", + "content": "[75] Yue Zhao, Yuanjun Xiong, and Philipp Krajhenbuhl. Image and video tokenization with binary spherical quantization. arXiv preprint arXiv:2406.07548, 2024. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 107, + 295, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 107, + 295, + 140 + ], + "spans": [ + { + "bbox": [ + 56, + 107, + 295, + 140 + ], + "type": "text", + "content": "[76] Lei Zhu, Fangyun Wei, Yanye Lu, and Dong Chen. Scaling the codebook size of vqgan to 100,000 with a utilization rate of " + }, + { + "bbox": [ + 56, + 107, + 295, + 140 + ], + "type": "inline_equation", + "content": "99\\%" + }, + { + "bbox": [ + 56, + 107, + 295, + 140 + ], + "type": "text", + "content": ". arXiv preprint arXiv:2406.11837, 2024. 2, 3, 4" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 57, + 141, + 295, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 141, + 295, + 184 + ], + "spans": [ + { + "bbox": [ + 57, + 141, + 295, + 184 + ], + "type": "text", + "content": "[77] Yongxin Zhu, Bocheng Li, Hang Zhang, Xin Li, Linli Xu, and Lidong Bing. Stabilize the latent space for image autoregressive modeling: A unified perspective. arXiv preprint arXiv:2410.12490, 2024. 3" + } + ] + } + ], + "index": 2 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 122, + 68, + 489, + 103 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 68, + 489, + 103 + ], + "spans": [ + { + "bbox": [ + 122, + 68, + 489, + 103 + ], + "type": "text", + "content": "GigaTok: Scaling Visual Tokenizers to 3 Billion Parameters for Autoregressive Image Generation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 233, + 112, + 376, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 112, + 376, + 129 + ], + "spans": [ + { + "bbox": [ + 233, + 112, + 376, + 129 + ], + "type": "text", + "content": "Supplementary Material" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 141, + 225, + 154 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 141, + 225, + 154 + ], + "spans": [ + { + "bbox": [ + 55, + 141, + 225, + 154 + ], + "type": "text", + "content": "A. Limitations and Future Work" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 162, + 296, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 162, + 296, + 318 + ], + "spans": [ + { + "bbox": [ + 55, + 162, + 296, + 318 + ], + "type": "text", + "content": "This study primarily focuses on scaling tokenizers for class-conditional image generation. While we have demonstrated the effectiveness of GigaTok for downstream class-conditional generation, expanding the scope to include text-conditional image generation or video generation remains an open avenue for future work. Additionally, unlike CNN-based 2D tokenizers, 1D Transformer-based tokenizers are not directly applicable to multiple resolutions without additional training adjustments. This challenge presents an important direction for further exploration. Besides scaling the model sizes of tokenizers, the effect of scaling training data, codebook dimension and codebook size for downstream autoregressive generation are left for future research." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 330, + 227, + 342 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 330, + 227, + 342 + ], + "spans": [ + { + "bbox": [ + 55, + 330, + 227, + 342 + ], + "type": "text", + "content": "B. Configurations for AR models" + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 91, + 360, + 261, + 429 + ], + "blocks": [ + { + "bbox": [ + 91, + 360, + 261, + 429 + ], + "lines": [ + { + "bbox": [ + 91, + 360, + 261, + 429 + ], + "spans": [ + { + "bbox": [ + 91, + 360, + 261, + 429 + ], + "type": "table", + "html": "
SizeParams.BlocksHeadsDim.
B111M1212768
L343M24161024
XL775M36201280
XXL1.4B48241536
", + "image_path": "6bae227554a03c1775492f7d81adf5e2f053dba39d655da7c38297c685182938.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 437, + 295, + 459 + ], + "lines": [ + { + "bbox": [ + 55, + 437, + 295, + 459 + ], + "spans": [ + { + "bbox": [ + 55, + 437, + 295, + 459 + ], + "type": "text", + "content": "Table 8. Architectures of the LLamaGen models in our experiments." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 55, + 474, + 295, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 474, + 295, + 593 + ], + "spans": [ + { + "bbox": [ + 55, + 474, + 295, + 593 + ], + "type": "text", + "content": "AR model training. We scale up the training of downstream Llama-style [50, 54] AR models to compare generation performance with other models. For model training, we use WSD learning rate scheduler [20, 25] with " + }, + { + "bbox": [ + 55, + 474, + 295, + 593 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-4}" + }, + { + "bbox": [ + 55, + 474, + 295, + 593 + ], + "type": "text", + "content": " base learning rate, 0.2 decay ratio and 1 epoch warm-up. We do not use AdaLN [44, 53] as it is specific for class-conditional generation. We use a batch size of 256 for training the B, L and XL models and a 512 batch size for training the XXL model. Our AR models are trained for 300 epochs on the " + }, + { + "bbox": [ + 55, + 474, + 295, + 593 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 55, + 474, + 295, + 593 + ], + "type": "text", + "content": " ImageNet training set." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 594, + 295, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 594, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 594, + 295, + 714 + ], + "type": "text", + "content": "CFG for gFID. Since gFID of GPT models can be largely affected by classifier free guidance (CFG) [47, 50] and often has an optimal CFG [50], for fair comparison, we search the optimal CFG using zero-order search with a step of 0.25 and use the lowest gFID as the final value. For AR Probing, we use constant CFG scheduling for simplicity. For system-level comparison, we use a step function for CFG scheduling inspired by [31]. Specifically, the AR models predict the first " + }, + { + "bbox": [ + 55, + 594, + 295, + 714 + ], + "type": "inline_equation", + "content": "18\\%" + }, + { + "bbox": [ + 55, + 594, + 295, + 714 + ], + "type": "text", + "content": " tokens without CFG, i.e., " + }, + { + "bbox": [ + 55, + 594, + 295, + 714 + ], + "type": "inline_equation", + "content": "\\mathrm{CFG} = 1" + }, + { + "bbox": [ + 55, + 594, + 295, + 714 + ], + "type": "text", + "content": " for better diversity, and use CFG for the remaining tokens" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 317, + 141, + 555, + 312 + ], + "blocks": [ + { + "bbox": [ + 317, + 141, + 555, + 312 + ], + "lines": [ + { + "bbox": [ + 317, + 141, + 555, + 312 + ], + "spans": [ + { + "bbox": [ + 317, + 141, + 555, + 312 + ], + "type": "image", + "image_path": "0aabb2d424621a2772efa50fc41a2ba598fc16ac33f0b541e7ec1e13c62263ba.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 327, + 323, + 541, + 335 + ], + "lines": [ + { + "bbox": [ + 327, + 323, + 541, + 335 + ], + "spans": [ + { + "bbox": [ + 327, + 323, + 541, + 335 + ], + "type": "text", + "content": "Figure 10. The architecture of GigaTok with Q-Former." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 316, + 345, + 553, + 437 + ], + "blocks": [ + { + "bbox": [ + 316, + 345, + 553, + 437 + ], + "lines": [ + { + "bbox": [ + 316, + 345, + 553, + 437 + ], + "spans": [ + { + "bbox": [ + 316, + 345, + 553, + 437 + ], + "type": "image", + "image_path": "55b90d7df7255b979b3f0cfa226e89b47598a89009decc9c68523c8829374359.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 317, + 446, + 550, + 458 + ], + "lines": [ + { + "bbox": [ + 317, + 446, + 550, + 458 + ], + "spans": [ + { + "bbox": [ + 317, + 446, + 550, + 458 + ], + "type": "text", + "content": "Figure 11. Initialization of 1D queries in Q-Former modules." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 479, + 554, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 479, + 554, + 503 + ], + "spans": [ + { + "bbox": [ + 313, + 479, + 554, + 503 + ], + "type": "text", + "content": "for better visual quality. Interestingly, we find that the 1.4B LlamaGen model achieves the best gFID without CFG." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 514, + 507, + 528 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 514, + 507, + 528 + ], + "spans": [ + { + "bbox": [ + 313, + 514, + 507, + 528 + ], + "type": "text", + "content": "C. Detailed GigaTok Implementation" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 534, + 479, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 534, + 479, + 545 + ], + "spans": [ + { + "bbox": [ + 313, + 534, + 479, + 545 + ], + "type": "text", + "content": "Please refer to Tab. 9 for training details." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 545, + 554, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 545, + 554, + 700 + ], + "spans": [ + { + "bbox": [ + 313, + 545, + 554, + 700 + ], + "type": "text", + "content": "Q-Fomrer in GigaTok. GigaTok utilizes Q-Former [6, 34] to build 1D tokenizers, as shown in Fig. 10. For Q-Former encoder in GigaTok, we initialize the 1D queries initialized from the 2D input features of the CNN encoder using a multi-level average pooling strategy, as shown in Fig. 11. Specifically, for the same 2D input features, we spatially divide them with different granularity at different levels, and perform average pooling for every divided region at each level. The pooled features are flattened and concatenated from level 0 to the last level. Therefore, a 1D token sequence with " + }, + { + "bbox": [ + 313, + 545, + 554, + 700 + ], + "type": "inline_equation", + "content": "2^{L}" + }, + { + "bbox": [ + 313, + 545, + 554, + 700 + ], + "type": "text", + "content": " length can be initialized with " + }, + { + "bbox": [ + 313, + 545, + 554, + 700 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 313, + 545, + 554, + 700 + ], + "type": "text", + "content": " levels from 2D input features. At the decoding stage, the 2D queries are all initialized from the first 1D latent feature." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 701, + 553, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 701, + 553, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 701, + 553, + 714 + ], + "type": "text", + "content": "Entropy Loss for VQ Tokenizers. While entropy loss [67," + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 161, + 70, + 451, + 346 + ], + "blocks": [ + { + "bbox": [ + 161, + 70, + 451, + 346 + ], + "lines": [ + { + "bbox": [ + 161, + 70, + 451, + 346 + ], + "spans": [ + { + "bbox": [ + 161, + 70, + 451, + 346 + ], + "type": "table", + "html": "
ConfigurationS-SS-BS-LB-LXL-XXL
Q-Former Encoder depth6661236
Q-Former Encoder heads8881220
Q-Former Encoder dim.5125125127681280
Q-Former Decoder depth612242448
Q-Former Decoder heads.812161624
Q-Former Decoder dim.512768102410241536
Params (M)1362325336222896
Codebook size16384
Codebook dimension8
#Tokens256
Training epochs100200200200300
Batch size128128256256256
Alignment Layer l3
Learning rate scheduleCosine Decay
Base learning rate\\( 1 \\times 10^{-4} \\)
Minimum learning rate\\( 1 \\times 10^{-5} \\)
LR warm-up iterations00005000
OptimizerAdamW[39]
Opt. momentum\\( \\beta_1 = 0.9, \\beta_2 = 0.95 \\)
Entropy Loss weight0000\\( 5 \\times 10^{-3} \\)
", + "image_path": "a2be56996275f976cf1f457e4c8dd1af6d6f1e9a7b132e726a12ef2c4f015402.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 198, + 353, + 414, + 365 + ], + "lines": [ + { + "bbox": [ + 198, + 353, + 414, + 365 + ], + "spans": [ + { + "bbox": [ + 198, + 353, + 414, + 365 + ], + "type": "text", + "content": "Table 9. GigaTok configuration and default training details" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 55, + 384, + 295, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 384, + 295, + 481 + ], + "spans": [ + { + "bbox": [ + 55, + 384, + 295, + 481 + ], + "type": "text", + "content": "[69] is discussed for LFQ [69], its application to VQ tokenizers is less commonly explained. We provide a detailed derivation of the entropy loss specifically for VQ tokenizers. Mathematically, for quantization process from continuous vector " + }, + { + "bbox": [ + 55, + 384, + 295, + 481 + ], + "type": "inline_equation", + "content": "\\mathbf{z} \\in \\mathbb{R}^D" + }, + { + "bbox": [ + 55, + 384, + 295, + 481 + ], + "type": "text", + "content": " to quantized vector " + }, + { + "bbox": [ + 55, + 384, + 295, + 481 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{z}} = \\mathbf{c}_i \\in \\mathbb{R}^D" + }, + { + "bbox": [ + 55, + 384, + 295, + 481 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 55, + 384, + 295, + 481 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_i" + }, + { + "bbox": [ + 55, + 384, + 295, + 481 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 55, + 384, + 295, + 481 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 55, + 384, + 295, + 481 + ], + "type": "text", + "content": "-th codebook vector from codebook " + }, + { + "bbox": [ + 55, + 384, + 295, + 481 + ], + "type": "inline_equation", + "content": "\\mathbf{C} \\in \\mathbb{R}^{N \\times D}" + }, + { + "bbox": [ + 55, + 384, + 295, + 481 + ], + "type": "text", + "content": ", we assume this process is statistical and follows the following distribution:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 97, + 490, + 295, + 506 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 490, + 295, + 506 + ], + "spans": [ + { + "bbox": [ + 97, + 490, + 295, + 506 + ], + "type": "interline_equation", + "content": "p (\\hat {\\mathbf {z}} = \\mathbf {c} _ {i} | \\mathbf {z}) \\triangleq \\operatorname {s o f t m a x} (- l _ {2} (\\mathbf {z}, \\mathbf {C})) [ i ] \\tag {4}", + "image_path": "e84d8dee75edb0b5e1b5359de086cf50cb99bf3269dd86ed1d8aeaf34ef19742.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 513, + 296, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 513, + 296, + 634 + ], + "spans": [ + { + "bbox": [ + 55, + 513, + 296, + 634 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 513, + 296, + 634 + ], + "type": "inline_equation", + "content": "l_{2}(\\mathbf{z},\\mathbf{C})\\in \\mathbb{R}^{N}" + }, + { + "bbox": [ + 55, + 513, + 296, + 634 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 55, + 513, + 296, + 634 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 55, + 513, + 296, + 634 + ], + "type": "text", + "content": " distance between " + }, + { + "bbox": [ + 55, + 513, + 296, + 634 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 55, + 513, + 296, + 634 + ], + "type": "text", + "content": " and all the codebook vectors. Then, minimization of the quantization error can be partially achieved by minimizing the expectation of entropy " + }, + { + "bbox": [ + 55, + 513, + 296, + 634 + ], + "type": "inline_equation", + "content": "\\mathbb{E}_{\\mathbf{z}}[H(\\hat{\\mathbf{z}} |\\mathbf{z})]" + }, + { + "bbox": [ + 55, + 513, + 296, + 634 + ], + "type": "text", + "content": ", which can be understood as maximizing the prediction confidence for " + }, + { + "bbox": [ + 55, + 513, + 296, + 634 + ], + "type": "inline_equation", + "content": "p(\\hat{\\mathbf{z}} |\\mathbf{z})" + }, + { + "bbox": [ + 55, + 513, + 296, + 634 + ], + "type": "text", + "content": ". To encourage higher codebook utilization, we aim to make the average appearance probability of codebook vectors more uniform. This is achieved by maximizing the entropy " + }, + { + "bbox": [ + 55, + 513, + 296, + 634 + ], + "type": "inline_equation", + "content": "H(\\hat{\\mathbf{z}})" + }, + { + "bbox": [ + 55, + 513, + 296, + 634 + ], + "type": "text", + "content": ". Therefore, the optimization of the two entropy terms leads to the final entropy loss equation:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 643, + 295, + 658 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 643, + 295, + 658 + ], + "spans": [ + { + "bbox": [ + 111, + 643, + 295, + 658 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {e n t r o p y}} = \\mathbb {E} _ {\\mathbf {z}} [ H (\\hat {\\mathbf {z}} | \\mathbf {z}) ] - H (\\hat {\\mathbf {z}}) \\tag {5}", + "image_path": "45da4b40e08de744a3a50b51d9ab95bb8b8df14ed43b4deffc897ea08a8800ef.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 665, + 296, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 665, + 296, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 665, + 296, + 715 + ], + "type": "text", + "content": "In practice, to calculate " + }, + { + "bbox": [ + 55, + 665, + 296, + 715 + ], + "type": "inline_equation", + "content": "H(\\hat{\\mathbf{z}})" + }, + { + "bbox": [ + 55, + 665, + 296, + 715 + ], + "type": "text", + "content": ", we estimate " + }, + { + "bbox": [ + 55, + 665, + 296, + 715 + ], + "type": "inline_equation", + "content": "p(\\hat{\\mathbf{z}} = \\mathbf{c}_i)" + }, + { + "bbox": [ + 55, + 665, + 296, + 715 + ], + "type": "text", + "content": " by " + }, + { + "bbox": [ + 55, + 665, + 296, + 715 + ], + "type": "inline_equation", + "content": "p(\\hat{\\mathbf{z}} = \\mathbf{c}_i) = \\mathbb{E}_{\\mathbf{z}}[p(\\hat{\\mathbf{z}} = \\mathbf{c}_i|\\mathbf{z})]" + }, + { + "bbox": [ + 55, + 665, + 296, + 715 + ], + "type": "text", + "content": ". Note that entropy loss is not our contribution. We only provide a detailed definition of entropy loss in VQ scenarios for better understanding." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 384, + 555, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 384, + 555, + 613 + ], + "spans": [ + { + "bbox": [ + 313, + 384, + 555, + 613 + ], + "type": "text", + "content": "Additional implementation details. To stabilize the training of our tokenizer with a hybrid architecture, we initially use a shortcut feature reconstruction trick at the first 15k iterations of the tokenizer training. But we later found that this trick can be replaced with a simple 1-epoch learning rate warmup combined with entropy loss [15, 69]. Specifically for this trick, we additionally give the output feature of the CNN encoder to the CNN decoder directly to be trained for reconstruction, and also align the output feature of the Transformer decoder to the output feature of the CNN encoder, besides the original training objectives. Note that this strategy is complex and can even hinder performance for XL-XXL tokenizers. We recommend using the learning rate warmup combined with entropy loss [15, 69] instead, for both XL-XXL tokenizer and the smaller ones. Additionally, we utilize the rotation trick [17] for all tokenizers, though we observe its effect on performance to be limited for our tokenizer. The implementation of the semantic regularization is partially inspired by REPA [71]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 621, + 522, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 621, + 522, + 635 + ], + "spans": [ + { + "bbox": [ + 313, + 621, + 522, + 635 + ], + "type": "text", + "content": "D. Full Evaluation Results and Analysis" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 642, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 642, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 642, + 556, + 715 + ], + "type": "text", + "content": "Here we present the full evaluation results for the tokenizers and downstream AR models, as summarized in Tab. 10. We observe that scaling up visual tokenizers consistently improves the reconstruction quality across multiple metrics. Interestingly, for the 1.4B AR model, the lowest gFID is obtained without applying any CFG. This phenomenon is" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 56, + 70, + 579, + 186 + ], + "blocks": [ + { + "bbox": [ + 56, + 70, + 579, + 186 + ], + "lines": [ + { + "bbox": [ + 56, + 70, + 579, + 186 + ], + "spans": [ + { + "bbox": [ + 56, + 70, + 579, + 186 + ], + "type": "table", + "html": "
TokenizerParam. rFID↓LPIPS↓PSNR↑SSIM↑AR ModelParam. gFID↓Acc.↑IS↑Precision↑Recall↑
LlamaGen-Tok. [50]72M2.19-20.790.675LlamaGen-B [50]111M5.46-193.610.830.45
GigaTok-S-S136M1.010.222620.740.670LlamaGen-B (1d) [50]111M4.0562.6240.610.810.51
GigaTok-S-B232M0.890.212120.930.677LlamaGen-B (1d) [50]111M3.8362.9233.310.830.51
GigaTok-B-L622M0.810.205921.210.685LlamaGen-B (1d) [50]111M3.2667.6221.020.810.56
LlamaGen-XXL (1d) [50]1.4B2.03*69.4238.520.800.63
GigaTok-B-L622M0.51‡0.20621.320.691LlamaGen-B (1d) [50]111M3.3367.7265.430.800.56
GigaTok-XL-XXL2.9B0.790.194721.650.699LlamaGen-B (1d) [50]111M3.1572.0224.280.820.55
LlamaGen-XXL (1d) [50]1.4B1.98*74.0256.760.810.62
", + "image_path": "c430d54c28f1d7deb48c5ced503e66bfc7c3f7e549a1d36977dcd91509812eac.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 195, + 555, + 218 + ], + "lines": [ + { + "bbox": [ + 55, + 195, + 555, + 218 + ], + "spans": [ + { + "bbox": [ + 55, + 195, + 555, + 218 + ], + "type": "text", + "content": "Table 10. Full results for our tokenizers and AR models on ImageNet " + }, + { + "bbox": [ + 55, + 195, + 555, + 218 + ], + "type": "inline_equation", + "content": "{256} \\times {256}" + }, + { + "bbox": [ + 55, + 195, + 555, + 218 + ], + "type": "text", + "content": " . For gFID,we present the lowest value between w/ or w/o CFG scenarios. " + }, + { + "bbox": [ + 55, + 195, + 555, + 218 + ], + "type": "inline_equation", + "content": "\\ddagger" + }, + { + "bbox": [ + 55, + 195, + 555, + 218 + ], + "type": "text", + "content": " : Using frozen DINO [7] for discriminator,which largely improves rFID. " + }, + { + "bbox": [ + 55, + 195, + 555, + 218 + ], + "type": "inline_equation", + "content": "\\star" + }, + { + "bbox": [ + 55, + 195, + 555, + 218 + ], + "type": "text", + "content": " : Without classifier-free-guidance." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 54, + 239, + 294, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 239, + 294, + 285 + ], + "spans": [ + { + "bbox": [ + 54, + 239, + 294, + 285 + ], + "type": "text", + "content": "also observed in the concurrent work FlexTok [3], despite significant differences between GigaTok and FlexTok. We hypothesize that semantic regularization might be the primary contributing factor for this phenomenon." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 287, + 295, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 287, + 295, + 478 + ], + "spans": [ + { + "bbox": [ + 55, + 287, + 295, + 478 + ], + "type": "text", + "content": "Discussion on Scaling and Enhancing the Discriminator. Recently, VAR [53], ImageFolder [36], and the concurrent work UniTok [41] have begun leveraging DINO-based discriminators [7, 43] to enhance tokenizer training, achieving impressive improvements in rFID scores. We have also experimented with the same DINO discriminator configuration as VAR. Our results indicate that although rFID scores improve, the downstream generation quality improvements are less significant, as detailed in Tab. 10. Furthermore, when applying the DINO discriminator to XL-XXL tokenizers, we observed that adversarial training frequently encounters instability. Specifically, a strong discriminator quickly learns to distinguish reconstructed samples, diminishing the benefits of adversarial training and leading to blurry artifacts. We leave further exploration of discriminator scaling and enhancement strategies for future work." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 490, + 277, + 502 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 490, + 277, + 502 + ], + "spans": [ + { + "bbox": [ + 55, + 490, + 277, + 502 + ], + "type": "text", + "content": "E. Training Tokenizers for More Iterations" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 510, + 295, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 510, + 295, + 593 + ], + "spans": [ + { + "bbox": [ + 55, + 510, + 295, + 593 + ], + "type": "text", + "content": "While we largely resolve the reconstruction vs. generation dilemma regarding tokenizer model scaling, this challenge persists for tokenizer training duration scaling. To illustrate this phenomenon, we train five S-S tokenizers ranging from 40 to 120 epochs using a cosine learning rate scheduler, as detailed in Tab. 9. The results are presented in Fig. 12." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 594, + 295, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 594, + 295, + 700 + ], + "spans": [ + { + "bbox": [ + 55, + 594, + 295, + 700 + ], + "type": "text", + "content": "When extending tokenizer training iterations, reconstruction quality consistently improves. However, downstream generation quality initially improves but subsequently degrades with further increases in tokenizer training duration. Additionally, the validation loss of AR probing continuously rises with longer tokenizer training, regardless of semantic regularization. This trend suggests an increasing complexity in the tokenizer's latent space as the training duration extends." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 701, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 701, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 67, + 701, + 295, + 713 + ], + "type": "text", + "content": "We hypothesize that data scaling may alleviate this is-" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 239, + 555, + 287 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 239, + 555, + 287 + ], + "spans": [ + { + "bbox": [ + 313, + 239, + 555, + 287 + ], + "type": "text", + "content": "sue, and leave it for future exploration. In practice, allocating computational resources toward model scaling rather than extended training duration may yield better tokenizer performance." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 300, + 533, + 313 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 300, + 533, + 313 + ], + "spans": [ + { + "bbox": [ + 313, + 300, + 533, + 313 + ], + "type": "text", + "content": "F. Linear Probing Accuracy of Tokenizers" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 320, + 555, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 320, + 555, + 499 + ], + "spans": [ + { + "bbox": [ + 313, + 320, + 555, + 499 + ], + "type": "text", + "content": "We show that the linear probing accuracy of the tokenizer encoders may not necessarily indicate the performance of downstream AR models. We utilize the intermediate checkpoints during the training of B-L and XL-XXL tokenizers for evaluation. As shown in Fig. 13, the XL-XXL tokenizer encoder presents an overfitting trend in terms of tokenizer encoder linear probing accuracy. However, this overfitting trend is not reflected in AR Probing linear probing accuracy or gFID. Therefore, the linear probing accuracy of the tokenizer encoders may not be a good indicator of downstream model performance. Similarly, a concurrent work UniTok [41], also points out that the performance of the tokenizer encoder in terms of zero-shot ImageNet classification accuracy may not necessarily reflect the visual understanding ability of downstream LLMs trained on the tokenizer." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 500, + 554, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 500, + 554, + 584 + ], + "spans": [ + { + "bbox": [ + 313, + 500, + 554, + 584 + ], + "type": "text", + "content": "The abnormality for large tokenizers reveals that the linear probing accuracy of the tokenizer is not necessarily a good indicator for downstream generation models. Since we care more about the representation learning for downstream models than for the tokenizers, using AR Probing as a direct evaluating method is better than indirect tokenizer linear probing accuracy." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 597, + 531, + 609 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 597, + 531, + 609 + ], + "spans": [ + { + "bbox": [ + 313, + 597, + 531, + 609 + ], + "type": "text", + "content": "G. More Discussions About Related Work" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 617, + 554, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 617, + 554, + 712 + ], + "spans": [ + { + "bbox": [ + 313, + 617, + 554, + 712 + ], + "type": "text", + "content": "TiTok [70] explores the use of 1D Transformer-based tokenizers under a high compression rate setting. TiTok seminally explores the model scaling of visual tokenizers and uses larger tokenizers for higher compression rate. However, the reconstruction vs. generation dilemma for scaling tokenizers is not solved in TiTok. As a result, the best generation model in TiTok is still trained on its smallest tokenizer variant." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 59, + 88, + 145, + 182 + ], + "blocks": [ + { + "bbox": [ + 59, + 88, + 145, + 182 + ], + "lines": [ + { + "bbox": [ + 59, + 88, + 145, + 182 + ], + "spans": [ + { + "bbox": [ + 59, + 88, + 145, + 182 + ], + "type": "image", + "image_path": "e205c5e0363951781746ffb7f15f0fea7dee10370ed96939e69739e03db2597f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 194, + 555, + 226 + ], + "lines": [ + { + "bbox": [ + 55, + 194, + 555, + 226 + ], + "spans": [ + { + "bbox": [ + 55, + 194, + 555, + 226 + ], + "type": "text", + "content": "Figure 12. Training duration scaling trends of tokenizers for reconstruction, downstream generation and representation quality with and without semantic regularization. Note that in the last two figures, the red and blue curves correspond to different scales on the y-axis." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 152, + 88, + 242, + 182 + ], + "blocks": [ + { + "bbox": [ + 152, + 88, + 242, + 182 + ], + "lines": [ + { + "bbox": [ + 152, + 88, + 242, + 182 + ], + "spans": [ + { + "bbox": [ + 152, + 88, + 242, + 182 + ], + "type": "image", + "image_path": "b397294c7c93b5ced9f442cccae73fbd6a03a99b75c046a878da0d7cb5af83a2.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 245, + 88, + 354, + 182 + ], + "blocks": [ + { + "bbox": [ + 245, + 88, + 354, + 182 + ], + "lines": [ + { + "bbox": [ + 245, + 88, + 354, + 182 + ], + "spans": [ + { + "bbox": [ + 245, + 88, + 354, + 182 + ], + "type": "image", + "image_path": "a0b2c71f7aa19b9521226367fa6f7ed0a44912ef10f9158ae65713269a8c6225.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 357, + 88, + 447, + 182 + ], + "blocks": [ + { + "bbox": [ + 357, + 88, + 447, + 182 + ], + "lines": [ + { + "bbox": [ + 357, + 88, + 447, + 182 + ], + "spans": [ + { + "bbox": [ + 357, + 88, + 447, + 182 + ], + "type": "image", + "image_path": "501724ecdc813e1787bc98b3a565459f1ad00649558af51fafe3505ee36c94c5.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 449, + 88, + 552, + 182 + ], + "blocks": [ + { + "bbox": [ + 449, + 88, + 552, + 182 + ], + "lines": [ + { + "bbox": [ + 449, + 88, + 552, + 182 + ], + "spans": [ + { + "bbox": [ + 449, + 88, + 552, + 182 + ], + "type": "image", + "image_path": "31b1d65e2e767d9a05af9902ebe910c062397920ef6b942299dd20cf5271bfa2.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 137, + 247, + 231, + 258 + ], + "blocks": [ + { + "bbox": [ + 137, + 247, + 231, + 258 + ], + "lines": [ + { + "bbox": [ + 137, + 247, + 231, + 258 + ], + "spans": [ + { + "bbox": [ + 137, + 247, + 231, + 258 + ], + "type": "image", + "image_path": "5b9beeef0eac54b87f8240947c7f51792dba750f90fc05d1fd7ac427dc9bfe16.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 58, + 263, + 132, + 348 + ], + "blocks": [ + { + "bbox": [ + 58, + 263, + 132, + 348 + ], + "lines": [ + { + "bbox": [ + 58, + 263, + 132, + 348 + ], + "spans": [ + { + "bbox": [ + 58, + 263, + 132, + 348 + ], + "type": "image", + "image_path": "2610462c1e32867a02d8209cfa0a22885a2e20c2ae3cb320c384a615f8ad054c.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 357, + 295, + 413 + ], + "lines": [ + { + "bbox": [ + 55, + 357, + 295, + 413 + ], + "spans": [ + { + "bbox": [ + 55, + 357, + 295, + 413 + ], + "type": "text", + "content": "Figure 13. The linear probing accuracy of tokenizer encoders does not necessarily reflect downstream model performance. As the training proceeds, the XL-XXL tokenizer encoder presents an overfitting trend measured by linear probing accuracy, but downstream model performances consistently improve." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 134, + 263, + 212, + 348 + ], + "blocks": [ + { + "bbox": [ + 134, + 263, + 212, + 348 + ], + "lines": [ + { + "bbox": [ + 134, + 263, + 212, + 348 + ], + "spans": [ + { + "bbox": [ + 134, + 263, + 212, + 348 + ], + "type": "image", + "image_path": "3e802de3a2022c636c39f13e07b3c7a49f1c95e42b86c55ecc2c4e86f8885e6d.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 219, + 263, + 290, + 348 + ], + "blocks": [ + { + "bbox": [ + 219, + 263, + 290, + 348 + ], + "lines": [ + { + "bbox": [ + 219, + 263, + 290, + 348 + ], + "spans": [ + { + "bbox": [ + 219, + 263, + 290, + 348 + ], + "type": "image", + "image_path": "c3261129c9a4a3015e129369727cb9928d0a5508af5ef7763dbd5e72adc541d6.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 436, + 295, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 436, + 295, + 579 + ], + "spans": [ + { + "bbox": [ + 55, + 436, + 295, + 579 + ], + "type": "text", + "content": "ViTok [21] is a concurrent work which has explored the effect of model scaling for VAE [28]. ViTok evaluates its VAE models in terms of both reconstruction and downstream diffusion generation performance. While having a very different setting from GigaTok, ViTok similarly finds that asymmetric design is better for VAEs. While ViTok suggests that small encoders are optimal, we point out that in our setting scaling encoders is also beneficial. Notably, the reconstruction vs. generation dilemma for scaling visual tokenizers is not solved in ViTok. We hypothesize that adding semantic regularization may similarly help solve the tokenizer scaling dilemma for VAEs, but leave it for future study." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 55, + 581, + 295, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 581, + 295, + 688 + ], + "spans": [ + { + "bbox": [ + 55, + 581, + 295, + 688 + ], + "type": "text", + "content": "MAGVIT-v2 [69] introduces LFQ to enhance discrete tokenizers. It also introduces the entropy penalty for tokenizer training, which is shown to be important for training large-scale tokenizers in our work. Instead of tokenizer model scaling, MAGVIT-v2 focuses more on scaling the codebook size of tokenizers. While codebook dimension and codebook size are important bottlenecks for visual tokenizers, we point out that model size scaling is also an important way for improving visual tokenizers." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 55, + 689, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 295, + 713 + ], + "type": "text", + "content": "ImageFolder [36] utilizes two branches for image encoding to handle high-level semantic information and low-level" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 247, + 553, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 247, + 553, + 281 + ], + "spans": [ + { + "bbox": [ + 313, + 247, + 553, + 281 + ], + "type": "text", + "content": "visual details respectively. It seminally utilizes semantic alignment to enhance the learned representation of tokenizers." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 283, + 555, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 283, + 555, + 380 + ], + "spans": [ + { + "bbox": [ + 313, + 283, + 555, + 380 + ], + "type": "text", + "content": "VA-VAE [65] tames the reconstruction vs. generation dilemma in increasing latent dimensions for continuous VAE [28, 29]. VA-VAE improves the reconstruction-generation Pareto Frontier by introducing vision foundation model alignment loss. In contrast, we seek continuous improvements in both reconstruction and generation by scaling tokenizers. Semantic regularization serves different purposes in the two works." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 200, + 72, + 447, + 86 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 200, + 72, + 447, + 86 + ], + "spans": [ + { + "bbox": [ + 200, + 72, + 447, + 86 + ], + "type": "text", + "content": "w/o semantic regularization w/ semantic regularization" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_08xxx/2504.08942/eb06eaaa-dd2d-4310-8824-a39ee9e3457c_content_list.json b/data/2025/2504_08xxx/2504.08942/eb06eaaa-dd2d-4310-8824-a39ee9e3457c_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..a4c98c7c9732d02071d9f8b2923f3328867a5a85 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08942/eb06eaaa-dd2d-4310-8824-a39ee9e3457c_content_list.json @@ -0,0 +1,1945 @@ +[ + { + "type": "text", + "text": "AGENTREWARDBENCH: Evaluating Automatic Evaluations of Web Agent Trajectories", + "text_level": 1, + "bbox": [ + 171, + 99, + 823, + 140 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xing Han Lu12 Amirhossein Kazemnejad*2", + "bbox": [ + 179, + 165, + 527, + 181 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Nicholas Meade12 Arkil Patel12 Dongchan Shin2 Alejandra Zambrano2", + "bbox": [ + 179, + 181, + 766, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Karolina Stanczak12 Peter Shaw4 Christopher J. Pal2567 Siva Reddy1257", + "bbox": [ + 179, + 196, + 769, + 213 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "*Core contributor ¹McGill University ²Mila Quebec AI Institute ⁴Google DeepMind", + "bbox": [ + 179, + 213, + 810, + 229 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "5Canada CIFAR AI Chair 6Polytechnique Montréal 7ServiceNow Research", + "bbox": [ + 179, + 229, + 738, + 243 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "xing.han.lu@mail.mcgill.ca; siva.reddy@mila.quebec", + "bbox": [ + 179, + 243, + 586, + 257 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 459, + 292, + 539, + 309 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Web agents enable users to perform tasks on web browsers through natural language interaction. Evaluating web agents trajectories is an important problem, since it helps us determine whether the agent successfully completed the tasks. Rule-based methods are widely used for this purpose, but they are challenging to extend to new tasks and may not always recognize successful trajectories. We may achieve higher accuracy through human evaluation, but the process would be substantially slower and more expensive. Automatic evaluations with LLMs may avoid the challenges of designing new rules and manually annotating trajectories, enabling faster and cost-effective evaluation. However, it is unclear how effective they are at evaluating web agents. To this end, we propose AGENTREWARD-BENCH, the first benchmark to assess the effectiveness of LLM judges for evaluating web agents. AGENTREWARD-BENCH contains 1302 trajectories across 5 benchmarks and 4 LLMs. Each trajectory in AGENTREWARD-BENCH is reviewed by an expert, who answers questions pertaining to the success, side effects, and repetitiveness of the agent. Using our benchmark, we evaluate 12 LLM judges and find that no single LLM excels across all benchmarks. We also find that the rule-based evaluation used by common benchmarks tends to underreport the success rate of web agents, highlighting a key weakness of rule-based evaluation and the need to develop more flexible automatic evaluations. We release the benchmark at: https://agent-reward-bench.github.io", + "bbox": [ + 228, + 325, + 769, + 632 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 171, + 664, + 318, + 679 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Giving a Large Language Model (LLM) access to a web browser unlocks an entirely new capability paradigm: beyond interacting with a user through a chat interface, such models can interact with the online world to complete tasks similar to how a human would. The promise of a new paradigm has motivated the design of LLMs to control interfaces such as web browsers, starting from earlier foundation models such as ACT-1 (Adept, 2022) to the more recent OpenAI Operator (OpenAI, 2025) and Claude Computer use (Anthropic, 2024a), showing promising results in real-world tasks (Zhou et al., 2024).", + "bbox": [ + 169, + 693, + 826, + 792 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To measure the progress of web agents, a well-designed benchmark should compile a collection of realistic tasks across diverse websites. As illustrated in Figure 1, a user may ask the agent to locate a Classifieds listing for a Google Pixel phone and submit an offer via a comment. Inside a dedicated environment (e.g., a self-hosted Classifieds site), the web agent would complete the task by filling the search bar, identifying the correct listing, and writing a comment to show interest in purchasing the item. To determine if the agent successfully completed the request, we need to automatically evaluate the agent's chosen actions – known as trajectories – using a set of rules uniquely designed for the task of finding a Pixel phone on Classifieds. As expected, rule-based evaluation is time-consuming for", + "bbox": [ + 169, + 797, + 826, + 925 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.08942v2 [cs.LG] 6 Oct 2025", + "bbox": [ + 22, + 284, + 57, + 710 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/44813690a64ed31c86bd46805a16225d99f96e5c83308ced722d0c5e48357ff5.jpg", + "image_caption": [ + "Figure 1: Example from AGENTREWARDBENCH, where an LLM judge evaluates a web agent's trajectory on VisualWebArena (Koh et al., 2024). The benchmark compares judgments against expert annotations to determine the effectiveness of the judge for evaluating web agents." + ], + "image_footnote": [], + "bbox": [ + 184, + 102, + 812, + 340 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "experts to design, and may not cover every successful scenario (e.g., what if the agent finds a different but valid listing?). It is also possible for an expert to annotate the trajectories, but it would be slow and expensive to scale across many web agents. This brings us to the following questions: Given a web agent trajectory, can an LLM decide if it is successful? If so, how do we determine which LLM is the most capable at evaluating web agents?", + "bbox": [ + 169, + 429, + 826, + 501 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Past works have shown that LLMs can be used as judges to evaluate the output of LLM chatbots (Zheng et al., 2023). More recently, LLM judges have been used for automatically evaluating trajectories from web agents (Pan et al., 2024; Murty et al., 2025; Trabucco et al., 2025). With highly accurate automatic evaluation methods, we can measure the progress of web agents on new sets of tasks, use them to synthesize trajectories for finetuning smaller models, and design reward models that can be used in a reinforcement learning (RL) setting. However, it remains unclear whether current automatic evaluators, whether rule-based or LLM-based, can predict the success of a trajectory in a way that reflects expert judgment.", + "bbox": [ + 169, + 506, + 826, + 619 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address this problem, we introduce AGENTREWARDBENCH (§3), a benchmark for determining the capability of an LLM at evaluating web agents (see Figure 1). It consists of 1300 trajectories produced by 4 popular LLM agents on 5 diverse web environments, ranging from common tasks like online shopping and posting on a forum, to highly specialized requests in professional environments, such as updating task schedules on IT task management platforms. Each trajectory is labeled by expert annotators to determine whether the agent successfully completed the task, caused unintended side effects, or entered cycles of repetitive actions. Using this benchmark, we evaluate both existing and novel LLM judges (§4) alongside rule-based evaluation. We find that rule-based methods, which are used as the official automatic evaluation by environment-based benchmarks, severely underestimate the capabilities of agents and do not reflect how experts define success (§5). We further provide an in-depth analysis (§6) that highlights the weaknesses of existing LLMs when used as judges. Overall, we believe AGENTREWARDBENCH can be used to enable better automatic evaluation and reward modeling for web agents.", + "bbox": [ + 169, + 625, + 826, + 820 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Related Works", + "text_level": 1, + "bbox": [ + 171, + 837, + 336, + 853 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Web Agents and Environments Designing agents that can automatically navigate user interfaces has been a long standing problem; earlier approaches employed program-based heuristics (St. Amant & Zettlemoyer, 2000), whereas later works on web navigation focus on training reinforcement learning (RL) models (Gur et al., 2018; Humphreys et al., 2022),", + "bbox": [ + 169, + 867, + 826, + 925 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 946, + 504, + 959 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "language models (Nakano et al., 2021; Gur et al., 2023; Deng et al., 2023) and multimodal models (Shaw et al., 2023; Lu et al., 2024; Zheng et al., 2024). To measure the advancements in web agents, various benchmarks have been proposed, with initial works proposing simplified environments (Shi et al., 2017; Liu et al., 2018) and subsequent iterations focusing on specific tasks like web shopping (Yao et al., 2022). More recent benchmarks focus on designing realistic environments that cover commonly used websites (Zhou et al., 2024; Koh et al., 2024) as well as specialized environments (Drouin et al., 2024; Boisvert et al., 2025).", + "bbox": [ + 169, + 103, + 823, + 203 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "LLM Judges Zheng et al. (2023) proposed using LLMs to predict human preferences of dialogue completion for chat models. They show that a GPT-4-based judge achieves over $80\\%$ agreement with human votes on the task of selecting better completions between models pairs. Follow-up extends this framework to new modalities (Chen et al., 2024), metrics (Feizi et al., 2025) and coding agents (Zhuge et al., 2024); the latter, Agent-as-a-Judge, leverages intermediate feedback from the environment. He et al. (2024) extend the idea by using LLMs to judge trajectories from web agents, allowing them to determine task completion without human annotators, resulting in a high correlation with humans on a private subset of trajectories. To determine the quality of automatic judgments, Pan et al. (2024) evaluate four LLM judges using trajectories from a GPT-4 agent on WebArena tasks, and find that the best judge achieves $80.6\\%$ accuracy against the rule-based evaluator from WebArena. Unlike prior works on LLM judges, we design AGENTREWARDBENCH with trajectories from several LLM agents on diverse web benchmarks, where each one is annotated by human experts on multiple dimensions. By following a human-focused approach similar to Lambert et al. (2024), we ensure that LLM judges are evaluated against expert preferences on a wide range of scenarios.", + "bbox": [ + 173, + 209, + 826, + 434 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Trajectory Synthesis Leveraging web environments that can be created and reset without real-world impact, recent works started to explore generating trajectories without human supervision. Leveraging LLM judges and LLM-generated tasks, trajectory synthesis can be used to bootstrap agent-judge training loops (Murty et al., 2024; 2025), to create contrastive pairs (Putta et al., 2024) for direct preference optimization (Rafailov et al., 2023), or as training data to finetune a base model (Lai et al., 2024; Patel et al., 2024; Trabucco et al., 2025). Although all the methods leverage an LLM judge, they lack a clear way of directly determining the quality of judgments, instead relying on the downstream performance improvement to validate their approach. To this end, AGENTREWARDBENCH enables researchers to choose the most appropriate LLM judge for a category of web tasks based on their effectiveness at evaluating web agents.", + "bbox": [ + 169, + 441, + 826, + 597 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 AGENTREWARDBENCH", + "text_level": 1, + "bbox": [ + 171, + 614, + 416, + 628 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this work, we introduce AGENTREWARDBENCH, a benchmark designed to assess the capabilities of LLM judges for evaluating web agents (§3.1). We curate 5 diverse web environments and tasks (§3.2) in order to collect trajectories from web agents based on 4 LLMs (§3.3). For each trajectory, a team of expert annotators carefully reviews the screenshots, actions, and the agent's reasoning chains before labeling them as either successful or unsuccessful, alongside other auxiliary labels (see Figure 2). Finally, we evaluate LLM judges (Table 1) by comparing their predictions with expert annotations to determine their effectiveness for automatic evaluation.", + "bbox": [ + 169, + 643, + 826, + 755 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 Assessment Framework", + "text_level": 1, + "bbox": [ + 171, + 768, + 390, + 782 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Trajectory Definition Let $o_i$ be an observation of a browser at time step $i$ , $a_i$ be an action that can be executed on a webpage through a browser navigation engine $B$ such that $o_{i+1} = B(o_i, a_i)$ , and $r_i$ be the reasoning for choosing the action. We define a web agent trajectory as the sequence $\\mathcal{T} = \\{o_1, (r_1, a_1), o_2, (r_2, a_2), \\ldots, o_{n-1}, (r_{n-1}, a_{n-1}), o_n\\}$ where $o_n$ is the final observation in the trajectory. Each observation contains a screenshot of the browser $s_i$ , the Document Object Model (DOM) tree representation of the browser, and an accessibility (A11Y) tree rendered from the DOM tree. For the observation to be useful for an LLM agent, we need a representation function $R$ that produces $p_i = R(o_1, r_1, a_1, \\ldots, o_i)$ , which can be used as an input for an LLM. If the agent is multimodal, $o_i$ would include", + "bbox": [ + 169, + 794, + 826, + 921 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/fea37e571b8202015c318f34a31dad1d1cf79bfd93e97315862a68c6d58c4491.jpg", + "image_caption": [ + "Figure 2: AGENTREWARDBENCH creation process. We first collect trajectories from LLM agents inside web environments using instructions from several benchmarks. Then, the trajectories are reviewed by expert annotators, who indicate if the trajectory is successful, led to side effects, and contains repetition cycles. Finally, we use the annotated trajectories to evaluate LLM judges." + ], + "image_footnote": [], + "bbox": [ + 184, + 101, + 812, + 238 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "screenshots; otherwise, it would be a textual representation of the page (e.g., accessibility tree). Then, $p_i$ is given to a language model to produce a completion $c_i = \\mathrm{LM}(p_i)$ , or $c_i = \\mathrm{VLM}(p_i, s_i)$ in the case of a multimodal LLM. The completion is parsed by an execution function $E$ to produce $(a_i, r_i) = E(c_i)$ .", + "bbox": [ + 169, + 340, + 823, + 398 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Annotation Design For each trajectory, an expert annotator reviews a goal $g$ and sequence $\\{s_1, (r_1, a_1), \\ldots, s_{n-1}, (r_{n-1}, a_{n-1}), s_n\\}$ in order to answer questions $\\mathcal{Q} = \\{q_1, \\ldots, q_m\\}$ . We consider the answers produced, $\\mathcal{A}^* = \\{a_1^*, \\ldots, a_m^*\\}$ , as the ground truth annotations for the trajectory, which indicate whether the agent successfully completed $g$ . To collect $\\mathcal{A}^*$ , we use the following $\\mathcal{Q}$ in the annotation guidelines:", + "bbox": [ + 169, + 406, + 823, + 477 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Success: Was the sequence of actions successful in achieving the goal?", + "2. Side Effect: Did the agent perform unnecessary actions that could lead to unintended side effects?", + "3. Repetition Cycle: Did the agent loop through a sequence of actions that did not make progress towards the goal?" + ], + "bbox": [ + 196, + 494, + 823, + 603 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Agreement with respect to success is the primary criterion with which we evaluate LLM judges. The remaining can be useful as auxiliary criteria for detecting issues ahead of time. For example, if an agent purchases several irrelevant products when the user only requested one, then the trajectory would be flagged for side effects, independent of task success. A judge can also indicate the presence of a cycle, for example, if the agent repeatedly clicks on a disabled button. Both signals can be used to penalize the agent during training or steer it to another action at inference.", + "bbox": [ + 169, + 619, + 823, + 717 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Annotation Setup The team of annotators consisted of 6 experts with a deep understanding of the tasks and environments through their research on web agents. They used a custom-built user interface that displays each trajectory with screenshots, actions, and reasoning. Rating disagreements were resolved by annotators discussing among themselves until clear annotations can be produced for ambiguous trajectories. Moreover, the annotators also have access to the environment and accessibility trees when screenshots are insufficient.", + "bbox": [ + 169, + 727, + 823, + 811 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Judge Model Given a goal $g$ , trajectory $\\mathcal{T}$ and questions $\\mathcal{Q}$ , a judge model returns a judgment $\\hat{\\mathcal{A}}$ , which is an estimate of $\\mathcal{A}^*$ . We can use $\\hat{\\mathcal{A}}$ to derive a reward in RL or to automatically evaluate web agents when $\\mathcal{A}^*$ is unavailable. To implement the judge, we need a judge-specific function $R_j$ that produces a representation of the trajectory, $p = R_j(o_1, r_1, a_1, \\ldots, o_n)$ . $R_j =$ can vary substantially, ranging from a simple list of actions $a_1, \\ldots, a_{n-1}$ , to using another LLM to process the observation history. We describe judges used in previous works and introduce a simplified judge in Section 4 and provide supplementary details in Section A.3.", + "bbox": [ + 169, + 819, + 826, + 925 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2 Tasks and Environments", + "text_level": 1, + "bbox": [ + 171, + 103, + 395, + 116 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We select 5 benchmarks designed to evaluate web agents inside dedicated environments and real websites, including general-purpose (Zhou et al., 2024), vision-focused (Koh et al., 2024), real-world (Yoran et al., 2024), and enterprise-oriented (Drouin et al., 2024; Boisvert et al., 2025) tasks. In total, we curate 351 unique tasks across 8 environments and 66 websites, which we separate into 51 development and 300 test tasks (details in Section A.1).", + "bbox": [ + 169, + 128, + 826, + 199 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "WebArena (WA; Zhou et al. 2024) This benchmark comprises 6 self-hosted websites covering a wide range of domains: customer relationship management, map navigation, online encyclopedia, shopping site, social forum, and software development collaboration platform. Each environment is derived from real open-source projects that develop self-hosted environments for both commercial and personal usage. Each task consists of a textual goal that requires a good understanding of one or multiple environments to complete.", + "bbox": [ + 169, + 207, + 826, + 294 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "VisualWebArena (VWA; Koh et al. 2024) To complement WebArena's text-based goals, we also include VisualWebArena (VWA), a benchmark focusing on tasks that require visual reasoning to complete. For instance, a user may include an image alongside the goal, or the task could be designed to only be solved if the agent selects an item with a unique visual characteristic (e.g., purchasing a TV with the widest bezel). VWA also introduces a new online marketplace environment (Classifieds).", + "bbox": [ + 169, + 301, + 825, + 387 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "AssistantBench (AB; Yoran et al. 2024) In addition to the self-hosted environments, we consider trajectories resulting from agent execution on real-world websites. This benchmark defines tasks that require navigating the internet, starting from a search engine. Since the test set is private, we use the validation set, which consists of 33 unique tasks.", + "bbox": [ + 169, + 393, + 823, + 450 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "WorkArena (Work; Drouin et al. 2024) and WorkArena++ (Wk++; Boisvert et al. 2025)", + "text_level": 1, + "bbox": [ + 169, + 459, + 825, + 473 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To increase the diversity of tasks relevant to professional environments, we incorporate WorkArena (Boisvert et al., 2025), a benchmark of 18 basic tasks on ServiceNow, a software-as-a-service platform for professional workflows in the information technology (IT), human resources, and customer management domains. WorkArena++ introduces tasks with greater complexity, requiring planning and reasoning to correctly complete multiple sub-tasks. Including this alongside WorkArena allows us to evaluate judges on a wider range of task difficulty. We focus on the Level 2 tasks since Level 3 is too challenging for current agents.", + "bbox": [ + 169, + 473, + 825, + 574 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3 Web Agents Design", + "text_level": 1, + "bbox": [ + 171, + 587, + 361, + 603 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To collect trajectories on the 5 benchmarks, we design web agents using two models from major commercial providers and two open-weight LLMs.", + "bbox": [ + 169, + 612, + 823, + 643 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "LLM backbones On the commercial side, we use OpenAI's $GPT-4o^2$ (Hurst et al., 2024) and Anthropic's Claude 3.7 Sonnet (Anthropic, 2024b). They are the flagship models of their respective providers, both of which offer computer-use agents powered by their LLMs, namely OpenAI Operator (OpenAI, 2025) and Anthropic Claude's Computer use (Anthropic, 2024a). We select two leading open-weights LLMs to complement the commercial LLMs: Llama-3.3-70B (Grattafori et al., 2024) and Qwen2.5-VL (Bai et al., 2025). In both cases, we choose the instruction-tuned variant, which have undergone post-training for tool-use or UI navigation. Moreover, since Llama-3.3 is a text-only model, it was excluded from VisualWebArena, which requires image-based reasoning.", + "bbox": [ + 169, + 652, + 826, + 779 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Agent Platform By default, each LLM backbone receives an input processed by a representation function $R$ and generates a completion $c_{i}$ . Then, $c_{i}$ is interpreted as an action by an execution function $E$ . To implement $E$ , we use AgentLab and BrowserGym (Chezelles et al., 2025), an ecosystem for designing web agents using LLMs (details in Section A.1).", + "bbox": [ + 169, + 787, + 826, + 844 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Trajectory Annotations and Splits We collect a total of 1302 trajectories from our 4 LLM-based web agents across five benchmarks. Based on the task split (§3.2), 196 trajectories are", + "bbox": [ + 169, + 852, + 826, + 882 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "1https://developer.servicenow.com", + "bbox": [ + 189, + 895, + 439, + 909 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "2We use the version gpt-4o-2024-11-20", + "bbox": [ + 192, + 910, + 452, + 922 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/307853ee995fe2de7274f47458079ca5894b5e3b2a5641a08611b24b94c6ed5a.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
CategoryJudgeOverallABVWAWA PrecisionWorkWk++
PrecisionRecallF1
OfficialRule-based*83.855.967.125.085.279.0100.083.3
ExistingAER-C67.771.969.783.356.068.8100.066.7
AER-V67.671.569.583.361.267.696.459.3
NNetNav52.582.464.120.854.554.377.343.2
Ours (A)Claude 3.7 S.68.881.674.787.561.069.385.066.7
GPT-4o69.883.175.977.863.070.294.663.0
GPT-4o Mini61.586.171.780.057.963.584.249.4
Llama 3.367.779.072.975.059.668.294.362.7
Qwen2.5-VL64.389.875.072.759.363.687.260.3
Ours (S)Claude 3.7 S.69.476.372.771.464.869.385.366.7
GPT-4o68.180.373.777.860.769.993.859.6
GPT-4o Mini64.578.370.880.057.466.990.354.8
Qwen2.5-VL64.586.173.770.058.562.993.864.4
", + "bbox": [ + 179, + 101, + 821, + 325 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 1: Judge performance for predicting success, measured with precision (§4.2). We report recall and F1 as auxiliary scores. We examine two variants of the simplified judge: one with the final accessibility tree (A), and the other with the final screenshot (S). *Rule-based evaluation are included for reference.", + "bbox": [ + 169, + 335, + 823, + 377 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "in the development split and 1106 are in the test split (details in A.2). The annotators follow the process described in Section 3.1 to label all trajectories, producing a total of 3906 binary annotations. To assess agreement between annotators, we annotated the GPT-4o agent's trajectory on WebArena with a second annotator. We obtained an inter-annotator agreement of $89.3\\%$ on success, indicating a high level of consistency among annotators.", + "bbox": [ + 169, + 415, + 826, + 489 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4 LLM judges for web tasks", + "text_level": 1, + "bbox": [ + 169, + 503, + 439, + 523 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1 Judge implementations", + "text_level": 1, + "bbox": [ + 169, + 535, + 390, + 551 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We consider two existing implementations of LLM judges for web agents, Agent Eval Refine (AER; Pan et al. 2024) and NNetNav (Murty et al., 2025), and introduce a simplified judge that simultaneously predicts success, side effects, and repetition. In Agent-as-a-Judge (Zhuge et al., 2024), the method assumes the judge can interact with the environment after the agent finishes executing actions, which isn't feasible when the environment state cannot be preserved or shareable across agents. Other LLM judge variants were proposed (He et al., 2024; Putta et al., 2024; Lai et al., 2024; Trabucco et al., 2025), but our three judge implementations cover major strategies for representing trajectories.", + "bbox": [ + 169, + 560, + 826, + 674 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "AER (Pan et al., 2024) The judge in this framework takes as input the sequence of agent thoughts and actions alongside the final browser state, which is either passed to a vision-enabled model as a screenshot (AER-V) or as a caption generated by a captioner model (AER-C). Then, the judge outputs its reasoning before predicting success or failure. For both the judge and captioner, we implement this method using GPT-4o, which is an overall stronger model than the GPT-4 (Achiam et al., 2023) model originally used.", + "bbox": [ + 169, + 681, + 826, + 767 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "NNNetNav (Murty et al., 2025) In this work, a Llama 3.1 70B judge receives a summary of changes across all observations and has to give a rating between 1 (worst) and 5 (best) after providing the thought process; the rating is binarized by thresholding at 4, based on the original implementation. To generate summaries, an LLM is used to describe the change between two observations based on the accessibility trees instead of screenshots. We use Llama 3.3 70B (Al-Dahle, 2024), an improved version of the original backbone.", + "bbox": [ + 169, + 773, + 826, + 859 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Simplified judge (ours) We propose a simplified design for our judge. First, it directly answers the three questions asked to the annotators. This allows it to return multiple labels within a single completion. Then, we decouple the system prompt and reasoning chain from the final state representation, allowing the judge to receive either the accessibility tree or the", + "bbox": [ + 169, + 867, + 826, + 926 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/785c9e954ce36bf5a7ab213d6ff6ad7916337d2e98fbdbfea2cb611e1577c5dd.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
A11YScreenSuccessSide EffectRepetition
PRF1PRF1PRF1
62.181.770.66.531.910.892.516.828.4
X61.586.171.77.270.813.078.646.458.3
X64.578.370.86.631.911.092.318.530.8
XX60.773.966.77.276.413.278.159.167.3
", + "bbox": [ + 205, + 101, + 789, + 191 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 2: Ablation study of our GPT-4o mini judge, measured in precision (P), recall (R), and F1. We consider how including accessibility trees and screenshots in the input affects the predictions.", + "bbox": [ + 169, + 202, + 823, + 231 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "screenshot. This differs from AER, which requires a vision-enabled model, and NNetNav, which requires a long-context model capable of receiving multiple accessibility trees. Our method is compatible with both multimodal and text-only LLMs and does not require a separate LLM to caption the screenshot or summarize changes across observations.", + "bbox": [ + 169, + 258, + 823, + 316 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2 Evaluation", + "text_level": 1, + "bbox": [ + 171, + 329, + 294, + 343 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To evaluate LLM judges, we use the precision score, which is the ratio of true positives over all predicted positives (true + false positives). The metric is a good fit for rejection finetuning (RFT), where we are interested in increasing the number of true positives (actual successful trajectories) while reducing the number of false positives (failed trajectories added to the dataset due to poor LLM judgments). For reward modeling, we also want to prioritize true positives since they are the primary signals for many RL algorithms, while false positives should be minimized to avoid introducing noise to the loss function. Moreover, recall and F1 benefit from minimizing false negatives, which is useful for improving sample efficiency by reducing the number of valid trajectories removed; we report them as auxiliary metrics.", + "bbox": [ + 169, + 354, + 826, + 481 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3 Judge Performance", + "text_level": 1, + "bbox": [ + 171, + 494, + 354, + 508 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In Table 1, we provide an overview of the performance of judges across benchmarks using the metrics defined in Section 4.2. We find that GPT-40 and Claude 3.7 Sonnet-based simplified judges achieve higher precision compared to prior approaches, indicating that removing the internal LLMs for captioning or summarizing changes does not hinder their capabilities. Notably, no judge consistently stands out across benchmarks, highlighting the importance of selecting an appropriate LLM backbone based on the nature of the task.", + "bbox": [ + 169, + 518, + 823, + 603 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Low precision limits existing judges We notice that no judge achieves above $70\\%$ precision, which means that $30\\%$ of trajectories are erroneously marked as successful. This severely limits the usefulness of the judges for downstream applications, such as using the filtered trajectories for finetuning an agent, as the agent will learn to generate incorrect trajectories for a substantial portion of the tasks. This indicates LLM judges are currently not a reliable way of assessing the true capabilities of agents. Consequently, judges will need to achieve higher precision before they are useful for automatic evaluation, which also affects their downstream utility for methods like RFT and RL.", + "bbox": [ + 169, + 612, + 826, + 724 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Official rule-based evaluation underestimates success Similar to LLM judges, the rule-based evaluation used by benchmarks can be compared with expert annotations. Since they use task-specific configurations to determine success, they may reject successful trajectories due to inconsequential differences. For instance, in WebArena, if a user asks \"What's the closest national park to the largest city in Maine?\", the agent may reply: \"The closest national park to Portland [...] is Acadia National Park\". Rule-based evaluation considers it unsuccessful since the configuration requires it to exactly match \"Acadia National Park\". As a result, the rule-based approach achieves a recall of $55.9\\%$ , indicating a higher rate of false negatives compared to LLM judges. Overall, a substantial precision gap exists between rule-based methods and LLM judges, but rule-based methods severely underestimate the true performance of web agents, highlighting the need for more flexible automatic evaluation.", + "bbox": [ + 169, + 732, + 826, + 886 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Impact of Input Representation Browser screenshots represent an intuitive state for humans, but LLMs may need more than vision alone, as screenshots miss page structure", + "bbox": [ + 169, + 895, + 826, + 925 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 491, + 946, + 504, + 959 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/5632a9bac292c4bee36a707181d78d6ad711c2e7f7fcd6e2c8dde1e3c012afc3.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
AgentHumanGPT-4o JudgeRule-based
VWAWAWk++VWAWAWk++VWAWAWk++
Claude 3.7 S.28.355.118.434.864.120.723.930.88.1
GPT-4o35.942.318.447.850.011.517.425.64.6
Llama 3.30.022.49.20.027.65.80.018.43.5
Qwen2.5-VL21.733.313.834.852.614.917.429.511.5
", + "bbox": [ + 181, + 101, + 815, + 193 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 3: Success Rate of web agents measured by expert annotators, GPT-4o Judge (with accessibility tree) and rule-based evaluation on various benchmarks (§3.2). Results by environment are in Table 6.", + "bbox": [ + 171, + 203, + 823, + 231 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "and hidden attributes found in accessibility trees. To investigate the impact of different representations, we ablate our GPT-4o-mini simplified judge in Table 2. We observe that only including screenshots achieves a high precision for success and repetition, whereas only including accessibility trees allows higher recall. Surprisingly, including both accessibility trees and screenshots yields a lower performance than including only the screenshot, indicating that more information distracts rather than assists the judge.", + "bbox": [ + 169, + 267, + 826, + 353 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5 Revisiting how we evaluate task success rate", + "text_level": 1, + "bbox": [ + 171, + 369, + 602, + 388 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "One of the core applications of LLM judges is to estimate the success rate on a web navigation benchmark, which is useful in scenarios where there are no dedicated functions to calculate the rule-based success rate, which is the standard evaluation for many web agent benchmarks. However, rule-based approaches may not always agree with experts. In Table 3, we compare the success rate calculated from expert annotations, rule-based evaluation, and a GPT-4o judge with accessibility trees.", + "bbox": [ + 169, + 400, + 826, + 484 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Rule-based evaluation does not reflect expert-defined success rates We notice a stark difference between the judge and rule-based approach: whereas the LLM judge tends to overestimate the success rate of every agent (with two exceptions in WorkArena++) rule-based methods consistently underestimate it. Moreover, the underestimation varies substantially, with the performance of GPT-4o being $16.7\\%$ lower on WebArena and $18.5\\%$ lower on VWA compared to expert annotations. This highlights a major discrepancy between the official task success rate reported by rule-based methods and the success rate according to expert annotators. For instance, rule-based evaluation ranks Qwen2.5-VL above GPT-4o on WebArena and WorkArena++ (and equally on VWA), whereas expert annotators prefer GPT-4o over Qwen2.5-VL on all benchmarks, with over $14\\%$ higher success rate on VWA. Overall, this stresses the need to develop new methods to calculate task success rate that more precisely reflect expert judgments.", + "bbox": [ + 169, + 493, + 826, + 661 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6 Error Analysis", + "text_level": 1, + "bbox": [ + 171, + 679, + 334, + 696 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this section, we qualitatively examine failure cases of LLM judges. Following a GPT-4o judge, we focus on the common error categories to understand how LLM judges fail.", + "bbox": [ + 169, + 708, + 823, + 739 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Grounding mismatch When an agent misunderstands what is happening on the screen, its thought process may not reflect the reality of the webpage. In such cases, a judge without access to the screenshots may produce an erroneous judgment due to the agent misunderstanding what is happening on the screen. For example, when a user requests to show \"the product page for the item in the second row, second column,\" the Qwen2.5-VL agent ends up selecting the second item in the first row. However, it writes in its reasoning chain that \"Based on the layout of the page, the second row, second column item is the [energy Drink].\" The judge accepts the agent's thought process without realizing it opened the wrong page: \"The agent's goal was to navigate to the product page for the item in the second row, second column. The agent successfully reached the correct product page.\"", + "bbox": [ + 169, + 746, + 826, + 887 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Misleading agent reasoning The agent may have misleading elements in its reasoning, leading the judge to reason that the agent completed the task correctly. In a multi-step", + "bbox": [ + 169, + 895, + 826, + 926 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/8dca0db1b855d122e7c1547c3afdb6ea23586839146460b25215fd8db956728c.jpg", + "image_caption": [ + "Figure 3: Example of judge error (discussed in Section 6). In this example, the user requests the agent to buy cereal in VisualWebArena, but the agent stops after adding it to the cart. The judge erroneously identified the trajectory as successful, even though the agent missed an important detail in the instruction." + ], + "image_footnote": [], + "bbox": [ + 181, + 101, + 511, + 303 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/bd9a757f9b7f9157f128559ec0caf9545154edd153e70d195365a1a3307480bb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 519, + 103, + 816, + 303 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "WorkArena++ task, the user requested the agent to apply a search filter to include a unique ID. After several unsuccessful attempts, the agent ended up stating it succeeded in its reasoning chain, even though no filter was applied. The judge was misled by the agent and wrote in its own reasoning chain that \"The agent successfully [...] applied the filter to extract entries...\" before incorrectly indicating that the trajectory was successful.", + "bbox": [ + 169, + 402, + 823, + 474 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Missed instruction details In some cases, the agent does not complete the task completely, missing crucial details from the instruction (see Figure 3). For example, when the user requests to \"buy the cheapest cereal with a graphic character on the box in the Cereals category,\" the agent finds the correct product and informs the customer: \"I've found the cheapest cereal with a graphic character on the box. It's Cocoa Puffs, 10.4 Oz Box...\" However, it missed a crucial detail: the user requested that they buy the product. Unfortunately, the judge mistakenly believes that the agent completed a purchase: \"The agent successfully identified and purchased the cheapest cereal with a graphic character on the box...\"", + "bbox": [ + 169, + 481, + 826, + 595 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Misunderstanding action intents We found that the agent may misuse certain actions, leading to the trajectory to fail very close to completion. In such cases, the LLM judge may incorrectly decide that the trajectory is successful and ignore the misused action. In one instance where the goal was to answer \"What is the minimum travel time by car from Animal Rescue League of Pittsburgh to Schenley park?\", the Qwen2.5-VL agent completes all required actions, but ends up reporting the task as unfeasible instead of replying to the user. The GPT-4o judge (with screenshot) correctly reasons that the travel time was shown on the screen, but does not point out that reporting the task as unfeasible is incorrect, instead asserting that \"all actions were confined to the task of finding the travel time.\"", + "bbox": [ + 169, + 602, + 823, + 729 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Overall, current LLM judges are limited by their capability to detect nuanced issues within trajectories, as shown by the judge missing details and misunderstanding an action. Moreover, they will easily agree with the agent's reasoning even when it is wrong, which has been previously observed in LLMs (Sharma et al., 2023). Future research should aim to address these issues to improve the performance of LLM judges for evaluating web agents.", + "bbox": [ + 169, + 734, + 826, + 806 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "7 Conclusion", + "text_level": 1, + "bbox": [ + 171, + 824, + 308, + 839 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We introduce AGENTREWARDBENCH, a benchmark for evaluating LLM judges for web agent trajectories. The benchmark consists of over 1300 trajectories, each annotated by experts across three dimensions: whether the agent succeeded, whether it caused unintended side effects, and whether it repeated unnecessary actions. We evaluate 12 LLM judges on AGENTREWARDBENCH and find that simpler input representation can achieve", + "bbox": [ + 169, + 853, + 823, + 925 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 503, + 958 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "higher agreement with expert annotators compared to prior approaches. Moreover, we find that rule-based evaluation, often used by environment-based benchmarks, does not achieve a lower-than-expected agreement with experts. Instead, it tends to reject many valid trajectories, which results in the success rate of certain web agents being lower than what an expert would perceive. Overall, we believe our benchmark will help researchers design better LLM judges for web agents trajectories, which will enable the design of automatic evaluators and reward models that better reflect expert judgments.", + "bbox": [ + 169, + 103, + 826, + 203 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Acknowledgments", + "text_level": 1, + "bbox": [ + 171, + 220, + 344, + 238 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Xing Han Lu acknowledges the support of the Natural Sciences and Engineering Research Council of Canada (NSERC) [funding reference no. 579403]. The project is supported by the Google-Mila grant. We thank Alexandre Lacoste, Shikhar Murty, and the McGill NLP group members for helpful discussions.", + "bbox": [ + 169, + 250, + 826, + 306 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 173, + 324, + 274, + 340 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023. URL https://arxiv.org/abs/2303.08774.", + "Adept. Act-1: Transformer for actions, 2022. URL https://www.adept.ai/blog/act-1/.", + "Ahmad Al-Dahle. The future of ai: Built with llama, December 2024. URL https://ai.meta.com/blog/future-of-ai-built-with-llama/.", + "Anthropic. Introducing computer use, a new claude 3.5 sonnet, and claude 3.5 haiku, 2024a. URL https://www.anthropic.com/news/3-5-models-and-computer-use.", + "Anthropic. The claude 3 model family: Opus, sonnet, haiku, 2024b. URL https://apisemantic scholar.org/CorpusID:268232499.", + "Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, Humen Zhong, Yuanzhi Zhu, Mingkun Yang, Zhaohai Li, Jianqiang Wan, Pengfei Wang, Wei Ding, Zheren Fu, Yiheng Xu, Jiabo Ye, Xi Zhang, Tianbao Xie, Zesen Cheng, Hang Zhang, Zhibo Yang, Haiyang Xu, and Junyang Lin. Qwen2.5-vl technical report, 2025. URL https://arxiv.org/abs/2502.13923.", + "Léo Boisvert, Megh Thakkar, Maxime Gasse, Massimo Caccia, Thibault Le Sellier De Chezelles, Quentin Cappart, Nicolas Chapados, Alexandre Lacoste, and Alexandre Drouin. Workarena++: Towards compositional planning and reasoning-based common knowledge work tasks, 2025. URL https://arxiv.org/abs/2407.05291.", + "Greg Brockman, Vicki Cheung, Ludwig Pettersson, Jonas Schneider, John Schulman, Jie Tang, and Wojciech Zaremba. Openai gym, 2016. URL https://arxiv.org/abs/1606.01540.", + "Dongping Chen, Ruoxi Chen, Shilin Zhang, Yinuo Liu, Yaochen Wang, Huichi Zhou, Qihui Zhang, Yao Wan, Pan Zhou, and Lichao Sun. Mllm-as-a-judge: Assessing multimodal llm-as-a-judge with vision-language benchmark, 2024. URL https://arxiv.org/abs/2402.04788.", + "Thibault Le Sellier De Chezelles, Maxime Gasse, Alexandre Drouin, Massimo Caccia, Léo Boisvert, Megh Thakkar, Tom Marty, Rim Assouel, Sahar Omidi Shayegan, Lawrence Keunho Jang, Xing Han Lu, Ori Yoran, Dehan Kong, Frank F. Xu, Siva Reddy, Quentin Cappart, Graham Neubig, Ruslan Salakhutdinov, Nicolas Chapados, and Alexandre Lacoste. The browsergym ecosystem for web agent research, 2025. URL https://arxiv.org/abs/2412.05467.", + "Xiang Deng, Yu Gu, Boyuan Zheng, Shijie Chen, Sam Stevens, Boshi Wang, Huan Sun, and Yu Su. Mind2web: Towards a generalist agent for the web. Advances in Neural Information Processing Systems, 36:28091-28114, 2023." + ], + "bbox": [ + 171, + 354, + 826, + 924 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Alexandre Drouin, Maxime Gasse, Massimo Caccia, Issam H. Laradji, Manuel Del Verme, Tom Marty, Léo Boisvert, Megh Thakkar, Quentin Cappart, David Vazquez, Nicolas Chapados, and Alexandre Lacoste. Workarena: How capable are web agents at solving common knowledge work tasks?, 2024. URL https://arxiv.org/abs/2403.07718.", + "Aarash Feizi, Sai Rajeswar, Adriana Romero-Soriano, Reihaneh Rabbany, Valentina Zantedeschi, Spandana Gella, and João Monteiro. Pairbench: Are vision-language models reliable at comparing what they see?, 2025. URL https://arxiv.org/abs/2502.15210.", + "Aaron Grattafori, Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Alex Vaughan, et al. The llama 3 herd of models. arXiv preprint, 2024. URL https://arxiv.org/abs/2407.21783.", + "Izzeddin Gur, Ulrich Rueckert, Aleksandra Faust, and Dilek Hakkani-Tur. Learning to navigate the web. arXiv preprint arXiv:1812.09195, 2018.", + "Izzeddin Gur, Hiroki Furuta, Austin Huang, Mustafa Safdari, Yutaka Matsuo, Douglas Eck, and Aleksandra Faust. A real-world webagent with planning, long context understanding, and program synthesis. arXiv preprint arXiv:2307.12856, 2023.", + "Hongliang He, Wenlin Yao, Kaixin Ma, Wenhao Yu, Yong Dai, Hongming Zhang, Zhenzhong Lan, and Dong Yu. Webvoyager: Building an end-to-end web agent with large multimodal models. ArXiv, abs/2401.13919, 2024. URL https://api-semanticscholar.org/CorpusID:267211622.", + "Peter C Humphreys, David Raposo, Toby Pohlen, Gregory Thornton, Rachita Chhaparia, Alistair Muldal, Josh Abramson, Petko Georgiev, Alex Goldin, Adam Santoro, and Timothy Lillicrap. A data-driven approach for learning to control computers, 2022. URL https://arxiv.org/abs/2202.08137.", + "Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024.", + "Jing Yu Koh, Robert Lo, Lawrence Jang, Vikram Duvvur, Ming Chong Lim, Po-Yu Huang, Graham Neubig, Shuyan Zhou, Ruslan Salakhutdinov, and Daniel Fried. VisualWebArena: Evaluating Multimodal Agents on Realistic Visual Web Tasks, June 2024. URL http://arxiv.org/abs/2401.13649. arXiv:2401.13649 [cs].", + "Hanyu Lai, Xiao Liu, Iat Long Iong, Shuntian Yao, Yuxuan Chen, Pengbo Shen, Hao Yu, Hanchen Zhang, Xiaohan Zhang, Yuxiao Dong, and Jie Tang. Autowebglm: A large language model-based web navigating agent, 2024. URL https://arxiv.org/abs/2404.03648.", + "Nathan Lambert, Valentina Pyatkin, Jacob Morrison, LJ Miranda, Bill Yuchen Lin, Khyathi Chandu, Nouha Dziri, Sachin Kumar, Tom Zick, Yejin Choi, Noah A. Smith, and Hannaneh Hajishirzi. Rewardbench: Evaluating reward models for language modeling, 2024. URL https://arxiv.org/abs/2403.13787.", + "Evan Zheran Liu, Kelvin Guu, Panupong Pasupat, Tianlin Shi, and Percy Liang. Reinforcement Learning on Web Interfaces using Workflow-guided Exploration. In 6th International Conference on Learning Representations, ICLR 2018, Vancouver, BC, Canada, April 30 - May 3, 2018, Conference Track Proceedings, 2018. URL https://openreview.net/forum?id=ryTp3f-0-.", + "Xing Han Lu, Zdenek Kasner, and Siva Reddy. Weblinx: Real-world website navigation with multi-turn dialogue. arXiv preprint arXiv:2402.05930, 2024.", + "Shikhar Murty, Christopher Manning, Peter Shaw, Mandar Joshi, and Kenton Lee. BAGEL: Bootstrapping Agents by Guiding Exploration with Language, June 2024. URL http://arxiv.org/abs/2403.08140.arXiv:2403.08140 [cs]." + ], + "bbox": [ + 171, + 102, + 826, + 924 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Shikhar Murty, Hao Zhu, Dzmitry Bahdanau, and Christopher D. Manning. Nnetnav: Unsupervised learning of browser agents through environment interaction in the wild, 2025. URL https://arxiv.org/abs/2410.02907.", + "Reiichiro Nakano, Jacob Hilton, Suchir Balaji, Jeff Wu, Long Ouyang, Christina Kim, Christopher Hesse, Shantanu Jain, Vineet Kosaraju, William Saunders, et al. Webgpt: Browser-assisted question-answering with human feedback. arXiv preprint arXiv:2112.09332, 2021.", + "OpenAI. Introducing operator, January 2025. URL https://openai.com/index/introducing-operator.", + "Jiayi Pan, Yichi Zhang, Nicholas Tomlin, Yifei Zhou, Sergey Levine, and Alane Suhr. Autonomous evaluation and refinement of digital agents, 2024. URL https://arxiv.org/abs/2404.06474.", + "Ajay Patel, Markus Hofmacher, Claudiu Leoveanu-Condrei, Marius-Constantin Dinu, Chris Callison-Burch, and Sepp Hochreiter. Large language models can self-improve at web agent tasks, 2024. URL https://arxiv.org/abs/2405.20309.", + "Pranav Putta, Edmund Mills, Naman Garg, Sumeet Motwani, Chelsea Finn, Divyansh Garg, and Rafael Rafailov. Agent q: Advanced reasoning and learning for autonomous ai agents, 2024. URL https://arxiv.org/abs/2408.07199.", + "Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36:53728-53741, 2023.", + "Mrinank Sharma, Meg Tong, Tomasz Korbak, David Duvenaud, Amanda Askell, Samuel R. Bowman, Newton Cheng, Esin Durmus, Zac Hatfield-Dodds, Scott R. Johnston, Shauna Kravec, Timothy Maxwell, Sam McCandlish, Kamal Ndousse, Oliver Rausch, Nicholas Schiefer, Da Yan, Miranda Zhang, and Ethan Perez. Towards understanding sycophancy in language models, 2023. URL https://arxiv.org/abs/2310.13548.", + "Peter Shaw, Mandar Joshi, James Cohan, Jonathan Berant, Panupong Pasupat, Hexiang Hu, Urvashi Khandelwal, Kenton Lee, and Kristina Toutanova. From pixels to UI actions: Learning to follow instructions via graphical user interfaces. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=3PjCt4kmRx.", + "Tianlin Shi, Andrej Karpathy, Linxi Fan, Jonathan Hernandez, and Percy Liang. World of bits: An open-domain platform for web-based agents. In International Conference on Machine Learning, pp. 3135-3144. PMLR, 2017.", + "Robert St. Amant and Luke S. Zettlemoyer. The user interface as an agent environment. In Proceedings of the Fourth International Conference on Autonomous Agents, AGENTS '00, pp. 483-490, New York, NY, USA, 2000. Association for Computing Machinery. ISBN 1581132301. doi: 10.1145/336595.337575. URL https://doi.org/10.1145/336595.337575.", + "Brandon Trabucco, Gunnar Sigurdsson, Robinson Piramuthu, and Ruslan Salakhutdinov. Towards internet-scale training for agents, 2025. URL https://arxiv.org/abs/2502.06776.", + "Shunyu Yao, Howard Chen, John Yang, and Karthik Narasimhan. WebShop: Towards Scalable Real-world Web Interaction with Grounded Language Agents. In NeurIPS, 2022. URL https://arxiv.org/abs/2207.01206.", + "Ori Yoran, Samuel Joseph Amouyal, Chaitanya Malaviya, Ben Bogin, Ofir Press, and Jonathan Berant. Assistantbench: Can web agents solve realistic and time-consuming tasks?, 2024. URL https://arxiv.org/abs/2407.15711.", + "Boyuan Zheng, Boyu Gou, Jihyung Kil, Huan Sun, and Yu Su. Gpt-4v(ision) is a generalist web agent, if grounded, 2024. URL https://arxiv.org/abs/2401.01614." + ], + "bbox": [ + 173, + 102, + 828, + 924 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric P. Xing, Hao Zhang, Joseph E. Gonzalez, and Ion Stoica. Judging llm-as-a-judge with mt-bench and chatbot arena, 2023. URL https://arxiv.org/abs/2306.05685.", + "Shuyan Zhou, Frank F. Xu, Hao Zhu, Xuhui Zhou, Robert Lo, Abishek Sridhar, Xianyi Cheng, Tianyue Ou, Yonatan Bisk, Daniel Fried, Uri Alon, and Graham Neubig. WebArena: A Realistic Web Environment for Building Autonomous Agents, April 2024. URL http://arxiv.org/abs/2307.13854. arXiv:2307.13854 [cs].", + "Mingchen Zhuge, Changsheng Zhao, Dylan Ashley, Wenyi Wang, Dmitrii Khizbullin, Yunyang Xiong, Zechun Liu, Ernie Chang, Raghuraman Krishnamoorthi, Yuandong Tian, Yangyang Shi, Vikas Chandra, and Jürgen Schmidhuber. Agent-as-a-judge: Evaluate agents with agents, 2024. URL https://arxiv.org/abs/2410.10934." + ], + "bbox": [ + 173, + 102, + 825, + 291 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A Benchmark", + "text_level": 1, + "bbox": [ + 171, + 102, + 313, + 118 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A.1 Environment and Experiments Details", + "text_level": 1, + "bbox": [ + 171, + 132, + 503, + 148 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "AssistantBench Although an unlimited number of websites can be visited, we observed that the agents visited a total of 66 unique domains between 1 and 129 times across all trajectories we collected. The number of times a domain was visited can be found in Table 4. Additionally, we replace the default search engine with an alternative search engine (https://duckduckgo.com) as the original homepage blocks browser automation, which renders the tasks unachievable.", + "bbox": [ + 169, + 157, + 823, + 242 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Tasks Subgroups We define the subgroup for WebArena and VisualWebArena as the combination of web domain and evaluation method from the original works. The evaluation methods consist of string matching, HTML-based programs, webpage image querying, and final URL matching. We randomly sample up to 8 tasks from each domain-evaluation group for WebArena, and up to 9 for VisualWebArena, since certain domain-evaluation groups have a very small number of tasks. For WorkArena, we attempt to evenly distribute the task categories. As a result, we have the following task distributions:", + "bbox": [ + 169, + 250, + 826, + 349 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- WebArena: Wikipedia (8), Map (18), Reddit (18), Shopping Admin (18), Shopping (19), Gitlab (19)", + "- VisualWebArena: Wikipedia (17), Reddit (27), Classifieds (28), Shopping (28)", + "- WorkArena: Sophisticated memory (15), Information retrieval (20), Contextual understanding infeasible tasks (21), Planning and problem solving (22), Data driven decision making and reasoning (22)" + ], + "bbox": [ + 212, + 359, + 823, + 454 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Agent Hyperparameters The binary flags used in AgentLab (Chezelles et al., 2025) are shown in Table 5. We set a maximum limit of 40K input tokens and 8192 output tokens.", + "bbox": [ + 169, + 465, + 823, + 494 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Agent Platform Implementation In addition to abstracting websites and browser engines into Gym-compatible environments (Brockman et al., 2016), BrowserGym (Drouin et al., 2024; Chezelles et al., 2025) offers advanced preprocessing of complex web inputs (i.e., DOM and accessibility trees) and can automatically parse LLM output and execute them as browser actions like clicks, form inputs, tab actions, etc. Additionally, the BrowserGym ecosystem includes AgentLab, a framework for processing input representation and managing web agent experiments. We use AgentLab to design our representation function $R$ , ensuring unified hyperparameters and inputs. As a result, we can avoid unintended differences that may arise from customizing prompts and representations for each LLM.", + "bbox": [ + 169, + 503, + 826, + 628 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A.2 Annotations", + "text_level": 1, + "bbox": [ + 171, + 643, + 310, + 657 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Trajectory filtering In total, 351 tasks were considered across 5 benchmarks (33 in AssistantBench, 100 in VisualWebArena, 100 in WebArena, 18 in WorkArena, and 100 in WorkArena++) We collect trajectories from agents built from each of three multimodal models: Claude 3.7 Sonnet, GPT-4o, Qwen2.5-VL. Moreover, since Llama 3.3 is not multimodal, we only collect trajectories on 251 tasks (excluding VisualWebArena). Additionally, Llama 3.3 did not complete two WebArena tasks (nos. 735 and 805) due to timeout issues that consistently occurred in the environment, despite multiple attempts to restart. Thus, we obtain a total of 1302 trajectories, where 196 are stored in the development split and 1106 in the test split.", + "bbox": [ + 169, + 667, + 826, + 796 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Interface To annotate the trajectories, we designed a fully customized annotation interface using Gradio (see Figure 4). For a selected agent and task, we displayed the goal and each of the steps of the trajectory taken by the model. It shows the model's reasoning and action, as well as a screenshot with the action element on overlay. Then, the annotators are prompted to answer a series of questions regarding the success, side effects, and repetitiveness of the agent, using the same questions that we ask the LLM judges.", + "bbox": [ + 169, + 801, + 823, + 887 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Shared Knowledge Given that the annotators are experts, it is possible that the annotators share knowledge of web agents that non-expert may not possess; we identify several shared", + "bbox": [ + 169, + 895, + 823, + 925 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "knowledge facts. (1) web agent design and capabilities: the annotators are aware that the agents are designed with LLMs, some of which have multimodal capabilities, and that they are capable of generating reasoning traces to support actions, and that the LLMs may be subject to hallucination or may product repetitive sequences of text. (2) dedicated web environments: the annotators know that several the websites used in the project come from prior publications in the domain, including WebArena (Zhou et al., 2024), VisualWebArena (Koh et al., 2024), WorkArena (Drouin et al., 2024; Boisvert et al., 2025). They are aware that some of the websites are designed specific for the task, whereas others come from real-world websites. (3) Automatic Evaluation: the annotators know that the web environments employ automatic evaluation methods, such as string matching and URL matching, to evaluate the agents. Thus, a task that is successful or unsuccessful may terminate earlier, but the agent will not be guaranteed to receive a positive reward for that task.", + "bbox": [ + 169, + 103, + 826, + 272 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Annotator agreements and disagreements resolution For most tasks, binary annotations can be produced. However, in some cases, the annotator may not be certain of their annotation, and are allowed to mark a trajectory as uncertain, which was subsequently reviewed by the other annotators. In some cases, annotators may disagree with their judgments. In general, a salient reason for mismatch is the ambiguity of the instructions. For example, a task instruction might mention \"buy a black t-shirt\", but may not specify if it is fully black or can have other graphics. In such cases, annotators are advised to go for the most lenient option. More generally, to ensure that annotators can easily voice their uncertainty and disagreement, the first half of the annotation was conducted in person with all annotators present concurrently. Thus, when an annotator was uncertain about the annotation for a trajectory, they can ask other annotators, who can deliberate about the correct annotation until a consensus is reached. This approach further allows other annotators to align to the consensus for the remaining annotations.", + "bbox": [ + 173, + 279, + 826, + 460 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "A.3 LLM Judges", + "text_level": 1, + "bbox": [ + 171, + 474, + 308, + 489 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "**Prompts** We use simple system prompt (Figure 5) and user message (Figure 6) templates without model-specific commands, allowing our prompt to be transferred to any LLM. We use distinct tags, such as and , to facilitate parsing the model output.", + "bbox": [ + 169, + 500, + 826, + 544 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Results We report extended results for 10 LLM judges, with the overall results in Table 7 and the finegrained results in Table 8 over all agents; the unaggregated results are presented in Tables 9 to 12.", + "bbox": [ + 169, + 550, + 826, + 594 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/67479fd205ea8a95ae38b87aa49014397d48430a2207fb34bad96a8b193cd8f6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 173, + 109, + 823, + 354 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/b4d93f4c536918e29a5020c6118dc73a1ced2f509d0c0f5cdc73963e81050241.jpg", + "image_caption": [ + "Figure 4: User Interface used by annotators for answering questions" + ], + "image_footnote": [], + "bbox": [ + 173, + 354, + 506, + 583 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/3cfbbd1665745f18b8e63dd716304db7e7b8a9a40ac4b8cb12767659dbd37df2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 516, + 354, + 823, + 583 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/79bcf03814eb760d5fc8e0eb52317dc44bb68f0d0eb22fcbf1d8db118ab2505d.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Domain#Domain#Domain#
duckduckgo.com129google.com112wizards.com24
blackbaudhosting.com21fedex.com17mtggoldfish.com17
usps.com12fidelity.ca12weather.gov10
yelp.com9linkedin.com9rottentomatoes.com9
nih.gov8tcgplayer.com8imdb.com8
yahoo.com7cagreatamerica.com7thedrinknation.com6
tripadvisor.com6express.dhl6californiagreatamerica.com5
seattlechildrensmuseum.org5monday.com5fubo.tv5
philamuseum.org5weatherspark.com5bing.com5
ensembl.org4wellhub.com4hubbioo.com3
wholefoodsmarket.com3alltrails.com3target.com2
andersmartialarts.com2wikipedia.org2sfyimby.com2
currentresults.com2stockanalysis.com2speakrj.com2
x.com2apple.com2extremeweatherwatch.com2
tmplclubs.com2sixflags.com1etf.com1
amazon.com1netflixreleases.com1weather-and-climate.com1
wunderground.com1redfin.com1talesofamountainmama.com1
themeparkcenter.com1seattleweatherblog.com1chromewebdata1
peacefoodnyc.com1sec.gov1calicolabs.com1
easyship.com1onlineshippingcalculator.com1tripadvisor.ca1
nyunews.com1fandango.com1aimobo.io1
anytots.com1morningstar.com1visitphilly.com1
", + "bbox": [ + 197, + 637, + 799, + 883 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Table 4: AssistantBench Website Visit Counts", + "bbox": [ + 346, + 896, + 650, + 909 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 488, + 948, + 508, + 959 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/7f48a6d6dffbee0c753c2cd9ee9598cf1c13ea70fb6596793215d0a51a81fe4d.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ValueFlags
Trueuseifax, use_som, use-thinking, use_concrete_example, use_ABSTRACT_example, use_hints, be_cautious
Falseuse_html, usepast_error_logs, use Think_history, use_diff, filter Visible_elements_only, long_description, individual/examples, use_plan, use_criticise, use_memory, enable chatting
", + "bbox": [ + 197, + 112, + 799, + 205 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/3fcd6d6c6c53ef5d655d6e0ff13f9bfff714b04e8a95435c83bca1a6b39cc8ae.jpg", + "table_caption": [ + "Table 5: Agentlab Hyperparameters" + ], + "table_footnote": [], + "table_body": "
BenchmarkAgentExpertLLM JudgeRule-based
AssistantBenchClaude 3.7 S.11.111.10.8
GPT-4o14.814.83.7
Llama 3.33.77.45.3
Qwen2.5-VL0.00.02.2
WebArenaClaude 3.7 S.55.164.130.8
GPT-4o42.350.025.6
Llama 3.322.427.618.4
Qwen2.5-VL33.352.629.5
VisualWebArenaClaude 3.7 S.28.334.823.9
GPT-4o35.947.817.4
Qwen2.5-VL21.734.817.4
WorkArenaClaude 3.7 S.68.868.850.0
GPT-4o50.056.250.0
Llama 3.356.250.056.2
Qwen2.5-VL56.256.256.2
WorkArena++Claude 3.7 S.18.420.78.1
GPT-4o18.411.54.6
Llama 3.39.25.83.5
Qwen2.5-VL13.814.911.5
OverallClaude 3.7 S.33.038.020.4
GPT-4o31.335.316.3
Llama 3.317.017.513.3
Qwen2.5-VL22.331.719.5
", + "bbox": [ + 225, + 260, + 772, + 609 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/3b3b523d52ea3d449263831d809c2bca3078fa57ca50ab960a8cb6a967fc27a2.jpg", + "table_caption": [ + "Table 6: Success Rate by evaluation type. For the LLM judge, we use GPT-4o with accessibility trees." + ], + "table_footnote": [], + "table_body": "
JudgeSuccessSide EffectRepetition
PRF1PRF1PRF1
AER-C67.771.969.7------
AER-V67.671.569.5------
Claude 3.7 S. (A)68.881.674.714.034.720.082.894.988.4
Claude 3.7 S. (S)69.476.372.714.144.421.482.094.587.8
Functional83.855.967.1------
GPT-4o (A)69.883.175.97.791.714.280.496.987.9
GPT-4o (S)68.180.373.77.590.313.879.296.286.9
GPT-4o Mini (A)61.586.171.77.270.813.078.646.458.3
GPT-4o Mini (S)64.578.370.86.631.911.092.318.530.8
Llama 3.3 (A)67.779.072.96.979.212.780.191.685.5
NNetNav52.582.464.1------
Qwen2.5-VL (A)64.389.875.09.055.615.488.172.679.6
Qwen2.5-VL (S)64.586.173.78.858.315.288.764.674.7
", + "bbox": [ + 197, + 665, + 802, + 871 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Table 7: Results over all benchmarks by judge. We report the precision (P) as the primary metric, and F1 and recall (R) as the auxiliary metrics.", + "bbox": [ + 169, + 880, + 823, + 910 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/55cf8ed31386a867855e3097e6c2a90acd7ed00a9c2e4344bc24ab3f9d7afdf9.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
BenchmarkJudgeSuccessSide EffectRepetition
PRF1PRF1PRF1
AssistantBenchAER-C83.362.571.4------
AER-V83.362.571.4------
Claude 3.7 S. (A)87.587.587.57.133.311.870.194.080.3
Claude 3.7 S. (S)71.462.566.77.733.312.570.892.080.0
Functional25.012.516.7------
GPT-4o (A)77.887.582.43.2100.06.368.696.080.0
GPT-4o (S)77.887.582.43.2100.06.367.194.078.3
GPT-4o Mini (A)80.0100.088.92.766.75.259.332.041.6
GPT-4o Mini (S)80.0100.088.93.433.36.390.018.030.0
Llama 3.3 (A)75.075.075.03.4100.06.665.896.078.0
NNNetNav20.862.531.2------
Qwen2.5-VL (A)72.7100.084.20.00.00.073.862.067.4
Qwen2.5-VL (S)70.087.577.85.766.710.572.252.060.5
VisualWebArenaAER-C56.070.962.6------
AER-V61.279.769.2------
Claude 3.7 S. (A)61.077.268.216.735.722.778.796.886.8
Claude 3.7 S. (S)64.874.769.417.142.924.577.897.686.6
Functional85.258.269.2------
GPT-4o (A)63.086.172.712.489.321.875.298.485.2
GPT-4o (S)60.782.369.912.492.921.972.799.283.9
GPT-4o Mini (A)57.983.568.410.764.318.474.446.056.9
GPT-4o Mini (S)57.473.464.411.235.717.187.121.434.4
Llama 3.3 (A)59.674.766.312.082.121.074.492.182.3
NNNetNav54.569.661.1------
Qwen2.5-VL (A)59.388.671.117.271.427.886.973.879.8
Qwen2.5-VL (S)58.587.370.015.464.324.883.359.569.4
WebArenaAER-C68.883.275.3------
AER-V67.684.074.9------
Claude 3.7 S. (A)69.389.177.911.828.616.784.988.686.7
Claude 3.7 S. (S)69.389.177.914.838.121.380.885.182.9
Functional79.053.864.0------
GPT-4o (A)70.289.178.59.690.517.482.993.988.1
GPT-4o (S)69.989.978.77.976.214.382.792.187.1
GPT-4o Mini (A)63.590.874.710.171.417.770.339.550.6
GPT-4o Mini (S)66.986.675.57.528.611.986.711.420.1
Llama 3.3 (A)68.286.676.38.676.215.479.786.082.7
NNNetNav54.390.867.9------
Qwen2.5-VL (A)63.694.175.96.728.610.984.763.272.4
Qwen2.5-VL (S)62.992.474.89.438.115.190.550.064.4
WorkArenaAER-C100.081.189.6------
AER-V96.473.083.1------
Claude 3.7 S. (A)85.091.988.38.350.014.376.086.480.8
Claude 3.7 S. (S)85.378.481.70.00.00.077.377.377.3
Functional100.091.995.8------
GPT-4o (A)94.694.694.62.650.05.070.486.477.5
GPT-4o (S)93.881.187.05.3100.010.072.081.876.6
GPT-4o Mini (A)84.286.585.33.350.06.266.736.447.1
GPT-4o Mini (S)90.375.782.40.00.00.0100.018.230.8
Llama 3.3 (A)94.389.291.70.00.00.081.881.881.8
NNNetNav77.391.983.9------
Qwen2.5-VL (A)87.291.989.50.00.00.0100.059.174.3
Qwen2.5-VL (S)93.881.187.00.00.00.086.759.170.3
WorkArena++AER-C66.742.351.8------
AER-V59.330.840.5------
Claude 3.7 S. (A)66.762.764.717.138.923.787.597.492.2
Claude 3.7 S. (S)66.750.057.113.661.122.287.398.992.8
Functional83.338.552.6------
GPT-4o (A)63.055.859.25.5100.010.385.698.591.6
GPT-4o (S)59.653.856.65.5100.010.484.598.290.8
GPT-4o Mini (A)49.476.960.15.283.39.787.852.966.1
GPT-4o Mini (S)54.865.459.64.633.38.196.520.233.4
Llama 3.3 (A)62.761.562.14.783.38.986.793.890.1
NNNetNav43.278.855.8------
Qwen2.5-VL (A)60.378.868.37.577.813.791.979.085.0
Qwen2.5-VL (S)64.473.168.56.477.811.893.275.783.6
", + "bbox": [ + 187, + 128, + 808, + 843 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Table 8: Finegrained results by benchmark and judge for all agents. We report the precision (P) as the primary metric, and F1 and recall (R) as the auxiliary metrics.", + "bbox": [ + 171, + 867, + 823, + 895 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/77007d4343c7aab2936fe569092fef7ca49b6622819e1c83f346774f57a1305e.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
BenchmarkJudgeSuccessSide EffectRepetition
PRF1PRF1PRF1
AssistantBenchAER-C83.362.571.4------
AER-V83.362.571.4------
Claude 3.7 S. (A)87.587.587.57.133.311.870.194.080.3
Claude 3.7 S. (S)71.462.566.77.733.312.570.892.080.0
Functional25.012.516.7------
GPT-4o (A)77.887.582.43.2100.06.368.696.080.0
GPT-4o (S)77.887.582.43.2100.06.367.194.078.3
GPT-4o Mini (A)80.0100.088.92.766.75.259.332.041.6
GPT-4o Mini (S)80.0100.088.93.433.36.390.018.030.0
Llama 3.3 (A)75.075.075.03.4100.06.665.896.078.0
NNNetNav20.862.531.2------
Qwen2.5-VL (A)72.7100.084.20.00.00.073.862.067.4
Qwen2.5-VL (S)70.087.577.85.766.710.572.252.060.5
VisualWebArenaAER-C56.070.962.6------
AER-V61.279.769.2------
Claude 3.7 S. (A)61.077.268.216.735.722.778.796.886.8
Claude 3.7 S. (S)64.874.769.417.142.924.577.897.686.6
Functional85.258.269.2------
GPT-4o (A)63.086.172.712.489.321.875.298.485.2
GPT-4o (S)60.782.369.912.492.921.972.799.283.9
GPT-4o Mini (A)57.983.568.410.764.318.474.446.056.9
GPT-4o Mini (S)57.473.464.411.235.717.187.121.434.4
Llama 3.3 (A)59.674.766.312.082.121.074.492.182.3
NNNetNav54.569.661.1------
Qwen2.5-VL (A)59.388.671.117.271.427.886.973.879.8
Qwen2.5-VL (S)58.587.370.015.464.324.883.359.569.4
WebArenaAER-C68.883.275.3------
AER-V67.684.074.9------
Claude 3.7 S. (A)69.389.177.911.828.616.784.988.686.7
Claude 3.7 S. (S)69.389.177.914.838.121.380.885.182.9
Functional79.053.864.0------
GPT-4o (A)70.289.178.59.690.517.482.993.988.1
GPT-4o (S)69.989.978.77.976.214.382.792.187.1
GPT-4o Mini (A)63.590.874.710.171.417.770.339.550.6
GPT-4o Mini (S)66.986.675.57.528.611.986.711.420.1
Llama 3.3 (A)68.286.676.38.676.215.479.786.082.7
NNNetNav54.390.867.9------
Qwen2.5-VL (A)63.694.175.96.728.610.984.763.272.4
Qwen2.5-VL (S)62.992.474.89.438.115.190.550.064.4
WorkArenaAER-C100.081.189.6------
AER-V96.473.083.1------
Claude 3.7 S. (A)85.091.988.38.350.014.376.086.480.8
Claude 3.7 S. (S)85.378.481.70.00.00.077.377.377.3
Functional100.091.995.8------
GPT-4o (A)94.694.694.62.650.05.070.486.477.5
GPT-4o (S)93.881.187.05.3100.010.072.081.876.6
GPT-4o Mini (A)84.286.585.33.350.06.266.736.447.1
GPT-4o Mini (S)90.375.782.40.00.00.0100.018.230.8
Llama 3.3 (A)94.389.291.70.00.00.081.881.881.8
NNNetNav77.391.983.9------
Qwen2.5-VL (A)87.291.989.50.00.00.0100.059.174.3
Qwen2.5-VL (S)93.881.187.00.00.00.086.759.170.3
WorkArena++AER-C66.742.351.8------
AER-V59.330.840.5------
Claude 3.7 S. (A)66.762.764.717.138.923.787.597.492.2
Claude 3.7 S. (S)66.750.057.113.661.122.287.398.992.8
Functional83.338.552.6------
GPT-4o (A)63.055.859.25.5100.010.385.698.591.6
GPT-4o (S)59.653.856.65.5100.010.484.598.290.8
GPT-4o Mini (A)49.476.960.15.283.39.787.852.966.1
GPT-4o Mini (S)54.865.459.64.633.38.196.520.233.4
Llama 3.3 (A)62.761.562.14.783.38.986.793.890.1
NNNetNav43.278.855.8------
Qwen2.5-VL (A)60.378.868.37.577.813.791.979.085.0
Qwen2.5-VL (S)64.473.168.56.477.811.893.275.783.6
", + "bbox": [ + 187, + 127, + 810, + 845 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Table 9: Finegrained results by benchmark and judge for Qwen2.5-VL agent. We report the precision (P) as the primary metric, and F1 and recall (R) as the auxiliary metrics.", + "bbox": [ + 171, + 867, + 823, + 895 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/7f2421a723f50fa8df7200781d075cb0c69a98bd3470aca8dc2f648844c87aab.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
BenchmarkJudgeSuccessSide EffectRepetition
PRF1PRF1PRF1
AssistantBenchAER-C83.362.571.4------
AER-V83.362.571.4------
Claude 3.7 S. (A)87.587.587.57.133.311.870.194.080.3
Claude 3.7 S. (S)71.462.566.77.733.312.570.892.080.0
Functional25.012.516.7------
GPT-4o (A)77.887.582.43.2100.06.368.696.080.0
GPT-4o (S)77.887.582.43.2100.06.367.194.078.3
GPT-4o Mini (A)80.0100.088.92.766.75.259.332.041.6
GPT-4o Mini (S)80.0100.088.93.433.36.390.018.030.0
Llama 3.3 (A)75.075.075.03.4100.06.665.896.078.0
NNNetNav20.862.531.2------
Qwen2.5-VL (A)72.7100.084.20.00.00.073.862.067.4
Qwen2.5-VL (S)70.087.577.85.766.710.572.252.060.5
VisualWebArenaAER-C56.070.962.6------
AER-V61.279.769.2------
Claude 3.7 S. (A)61.077.268.216.735.722.778.796.886.8
Claude 3.7 S. (S)64.874.769.417.142.924.577.897.686.6
Functional85.258.269.2------
GPT-4o (A)63.086.172.712.489.321.875.298.485.2
GPT-4o (S)60.782.369.912.492.921.972.799.283.9
GPT-4o Mini (A)57.983.568.410.764.318.474.446.056.9
GPT-4o Mini (S)57.473.464.411.235.717.187.121.434.4
Llama 3.3 (A)59.674.766.312.082.121.074.492.182.3
NNNetNav54.569.661.1------
Qwen2.5-VL (A)59.388.671.117.271.427.886.973.879.8
Qwen2.5-VL (S)58.587.370.015.464.324.883.359.569.4
WebArenaAER-C68.883.275.3------
AER-V67.684.074.9------
Claude 3.7 S. (A)69.389.177.911.828.616.784.988.686.7
Claude 3.7 S. (S)69.389.177.914.838.121.380.885.182.9
Functional79.053.864.0------
GPT-4o (A)70.289.178.59.690.517.482.993.988.1
GPT-4o (S)69.989.978.77.976.214.382.792.187.1
GPT-4o Mini (A)63.590.874.710.171.417.770.339.550.6
GPT-4o Mini (S)66.986.675.57.528.611.986.711.420.1
Llama 3.3 (A)68.286.676.38.676.215.479.786.082.7
NNNetNav54.390.867.9------
Qwen2.5-VL (A)63.694.175.96.728.610.984.763.272.4
Qwen2.5-VL (S)62.992.474.89.438.115.190.550.064.4
WorkArenaAER-C100.081.189.6------
AER-V96.473.083.1------
Claude 3.7 S. (A)85.091.988.38.350.014.376.086.480.8
Claude 3.7 S. (S)85.378.481.70.00.00.077.377.377.3
Functional100.091.995.8------
GPT-4o (A)94.694.694.62.650.05.070.486.477.5
GPT-4o (S)93.881.187.05.3100.010.072.081.876.6
GPT-4o Mini (A)84.286.585.33.350.06.266.736.447.1
GPT-4o Mini (S)90.375.782.40.00.00.0100.018.230.8
Llama 3.3 (A)94.389.291.70.00.00.081.881.881.8
NNNetNav77.391.983.9------
Qwen2.5-VL (A)87.291.989.50.00.00.0100.059.174.3
Qwen2.5-VL (S)93.881.187.00.00.00.086.759.170.3
WorkArena++AER-C66.742.351.8------
AER-V59.330.840.5------
Claude 3.7 S. (A)66.762.764.717.138.923.787.597.492.2
Claude 3.7 S. (S)66.750.057.113.661.122.287.398.992.8
Functional83.338.552.6------
GPT-4o (A)63.055.859.25.5100.010.385.698.591.6
GPT-4o (S)59.653.856.65.5100.010.484.598.290.8
GPT-4o Mini (A)49.476.960.15.283.39.787.852.966.1
GPT-4o Mini (S)54.865.459.64.633.38.196.520.233.4
Llama 3.3 (A)62.761.562.14.783.38.986.793.890.1
NNNetNav43.278.855.8------
Qwen2.5-VL (A)60.378.868.37.577.813.791.979.085.0
Qwen2.5-VL (S)64.473.168.56.477.811.893.275.783.6
", + "bbox": [ + 187, + 128, + 808, + 843 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Table 10: Finegrained results by benchmark and judge for Llama 3.3 agent. We report the precision (P) as the primary metric, and F1 and recall (R) as the auxiliary metrics.", + "bbox": [ + 171, + 867, + 823, + 895 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 19 + }, + { + "type": "table", + "img_path": "images/3638d2893524c1d751349fe73edd5030d584b750e19926704cc9ef9bb7bf8899.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
BenchmarkJudgeSuccessSide EffectRepetition
PRF1PRF1PRF1
AssistantBenchAER-C83.362.571.4------
AER-V83.362.571.4------
Claude 3.7 S. (A)87.587.587.57.133.311.870.194.080.3
Claude 3.7 S. (S)71.462.566.77.733.312.570.892.080.0
Functional25.012.516.7------
GPT-4o (A)77.887.582.43.2100.06.368.696.080.0
GPT-4o (S)77.887.582.43.2100.06.367.194.078.3
GPT-4o Mini (A)80.0100.088.92.766.75.259.332.041.6
GPT-4o Mini (S)80.0100.088.93.433.36.390.018.030.0
Llama 3.3 (A)75.075.075.03.4100.06.665.896.078.0
NNNetNav20.862.531.2------
Qwen2.5-VL (A)72.7100.084.20.00.00.073.862.067.4
Qwen2.5-VL (S)70.087.577.85.766.710.572.252.060.5
VisualWebArenaAER-C56.070.962.6------
AER-V61.279.769.2------
Claude 3.7 S. (A)61.077.268.216.735.722.778.796.886.8
Claude 3.7 S. (S)64.874.769.417.142.924.577.897.686.6
Functional85.258.269.2------
GPT-4o (A)63.086.172.712.489.321.875.298.485.2
GPT-4o (S)60.782.369.912.492.921.972.799.283.9
GPT-4o Mini (A)57.983.568.410.764.318.474.446.056.9
GPT-4o Mini (S)57.473.464.411.235.717.187.121.434.4
Llama 3.3 (A)59.674.766.312.082.121.074.492.182.3
NNNetNav54.569.661.1------
Qwen2.5-VL (A)59.388.671.117.271.427.886.973.879.8
Qwen2.5-VL (S)58.587.370.015.464.324.883.359.569.4
WebArenaAER-C68.883.275.3------
AER-V67.684.074.9------
Claude 3.7 S. (A)69.389.177.911.828.616.784.988.686.7
Claude 3.7 S. (S)69.389.177.914.838.121.380.885.182.9
Functional79.053.864.0------
GPT-4o (A)70.289.178.59.690.517.482.993.988.1
GPT-4o (S)69.989.978.77.976.214.382.792.187.1
GPT-4o Mini (A)63.590.874.710.171.417.770.339.550.6
GPT-4o Mini (S)66.986.675.57.528.611.986.711.420.1
Llama 3.3 (A)68.286.676.38.676.215.479.786.082.7
NNNetNav54.390.867.9------
Qwen2.5-VL (A)63.694.175.96.728.610.984.763.272.4
Qwen2.5-VL (S)62.992.474.89.438.115.190.550.064.4
WorkArenaAER-C100.081.189.6------
AER-V96.473.083.1------
Claude 3.7 S. (A)85.091.988.38.350.014.376.086.480.8
Claude 3.7 S. (S)85.378.481.70.00.00.077.377.377.3
Functional100.091.995.8------
GPT-4o (A)94.694.694.62.650.05.070.486.477.5
GPT-4o (S)93.881.187.05.3100.010.072.081.876.6
GPT-4o Mini (A)84.286.585.33.350.06.266.736.447.1
GPT-4o Mini (S)90.375.782.40.00.00.0100.018.230.8
Llama 3.3 (A)94.389.291.70.00.00.081.881.881.8
NNNetNav77.391.983.9------
Qwen2.5-VL (A)87.291.989.50.00.00.0100.059.174.3
Qwen2.5-VL (S)93.881.187.00.00.00.086.759.170.3
WorkArena++AER-C66.742.351.8------
AER-V59.330.840.5------
Claude 3.7 S. (A)66.762.764.717.138.923.787.597.492.2
Claude 3.7 S. (S)66.750.057.113.661.122.287.398.992.8
Functional83.338.552.6------
GPT-4o (A)63.055.859.25.5100.010.385.698.591.6
GPT-4o (S)59.653.856.65.5100.010.484.598.290.8
GPT-4o Mini (A)49.476.960.15.283.39.787.852.966.1
GPT-4o Mini (S)54.865.459.64.633.38.196.520.233.4
Llama 3.3 (A)62.761.562.14.783.38.986.793.890.1
NNNetNav43.278.855.8------
Qwen2.5-VL (A)60.378.868.37.577.813.791.979.085.0
Qwen2.5-VL (S)64.473.168.56.477.811.893.275.783.6
", + "bbox": [ + 187, + 127, + 810, + 845 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Table 11: Finegrained results by benchmark and judge for Claude 3.7 Sonnet agent. We report the precision (P) as the primary metric, and F1 and recall (R) as the auxiliary metrics.", + "bbox": [ + 171, + 866, + 823, + 895 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 20 + }, + { + "type": "table", + "img_path": "images/4863bb5ba625a6f6e63f09af52fe2c622bd6c22434726514333cd02a82329430.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
BenchmarkJudgeSuccessSide EffectRepetition
PRF1PRF1PRF1
AssistantBenchAER-C83.362.571.4------
AER-V83.362.571.4------
Claude 3.7 S. (A)87.587.587.57.133.311.870.194.080.3
Claude 3.7 S. (S)71.462.566.77.733.312.570.892.080.0
Functional25.012.516.7------
GPT-4o (A)77.887.582.43.2100.06.368.696.080.0
GPT-4o (S)77.887.582.43.2100.06.367.194.078.3
GPT-4o Mini (A)80.0100.088.92.766.75.259.332.041.6
GPT-4o Mini (S)80.0100.088.93.433.36.390.018.030.0
Llama 3.3 (A)75.075.075.03.4100.06.665.896.078.0
NNNetNav20.862.531.2------
Qwen2.5-VL (A)72.7100.084.20.00.00.073.862.067.4
Qwen2.5-VL (S)70.087.577.85.766.710.572.252.060.5
VisualWebArenaAER-C56.070.962.6------
AER-V61.279.769.2------
Claude 3.7 S. (A)61.077.268.216.735.722.778.796.886.8
Claude 3.7 S. (S)64.874.769.417.142.924.577.897.686.6
Functional85.258.269.2------
GPT-4o (A)63.086.172.712.489.321.875.298.485.2
GPT-4o (S)60.782.369.912.492.921.972.799.283.9
GPT-4o Mini (A)57.983.568.410.764.318.474.446.056.9
GPT-4o Mini (S)57.473.464.411.235.717.187.121.434.4
Llama 3.3 (A)59.674.766.312.082.121.074.492.182.3
NNNetNav54.569.661.1------
Qwen2.5-VL (A)59.388.671.117.271.427.886.973.879.8
Qwen2.5-VL (S)58.587.370.015.464.324.883.359.569.4
WebArenaAER-C68.883.275.3------
AER-V67.684.074.9------
Claude 3.7 S. (A)69.389.177.911.828.616.784.988.686.7
Claude 3.7 S. (S)69.389.177.914.838.121.380.885.182.9
Functional79.053.864.0------
GPT-4o (A)70.289.178.59.690.517.482.993.988.1
GPT-4o (S)69.989.978.77.976.214.382.792.187.1
GPT-4o Mini (A)63.590.874.710.171.417.770.339.550.6
GPT-4o Mini (S)66.986.675.57.528.611.986.711.420.1
Llama 3.3 (A)68.286.676.38.676.215.479.786.082.7
NNNetNav54.390.867.9------
Qwen2.5-VL (A)63.694.175.96.728.610.984.763.272.4
Qwen2.5-VL (S)62.992.474.89.438.115.190.550.064.4
WorkArenaAER-C100.081.189.6------
AER-V96.473.083.1------
Claude 3.7 S. (A)85.091.988.38.350.014.376.086.480.8
Claude 3.7 S. (S)85.378.481.70.00.00.077.377.377.3
Functional100.091.995.8------
GPT-4o (A)94.694.694.62.650.05.070.486.477.5
GPT-4o (S)93.881.187.05.3100.010.072.081.876.6
GPT-4o Mini (A)84.286.585.33.350.06.266.736.447.1
GPT-4o Mini (S)90.375.782.40.00.00.0100.018.230.8
Llama 3.3 (A)94.389.291.70.00.00.081.881.881.8
NNNetNav77.391.983.9------
Qwen2.5-VL (A)87.291.989.50.00.00.0100.059.174.3
Qwen2.5-VL (S)93.881.187.00.00.00.086.759.170.3
WorkArena++AER-C66.742.351.8------
AER-V59.330.840.5------
Claude 3.7 S. (A)66.762.764.717.138.923.787.597.492.2
Claude 3.7 S. (S)66.750.057.113.661.122.287.398.992.8
Functional83.338.552.6------
GPT-4o (A)63.055.859.25.5100.010.385.698.591.6
GPT-4o (S)59.653.856.65.5100.010.484.598.290.8
GPT-4o Mini (A)49.476.960.15.283.39.787.852.966.1
GPT-4o Mini (S)54.865.459.64.633.38.196.520.233.4
Llama 3.3 (A)62.761.562.14.783.38.986.793.890.1
NNNetNav43.278.855.8------
Qwen2.5-VL (A)60.378.868.37.577.813.791.979.085.0
Qwen2.5-VL (S)64.473.168.56.477.811.893.275.783.6
", + "bbox": [ + 187, + 128, + 808, + 843 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Table 12: Finegrained results by benchmark and judge for GPT-4o agent. We report the precision (P) as the primary metric, and F1 and recall (R) as the auxiliary metrics.", + "bbox": [ + 171, + 866, + 823, + 893 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "System Prompt Template", + "text_level": 1, + "bbox": [ + 197, + 109, + 366, + 125 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "You are a Language Model specialized in judging the performance of web agents in web navigation tasks. For a certain website, you are given the goal of a navigation task, the current URL of the webpage, the actions taken by the agent, and the thought process of the agent. Additionally, you will have access to the first and last screenshots, as well as the last accessibility tree. Your task is to answer several questions about the agent's performance in the task.", + "bbox": [ + 197, + 136, + 797, + 213 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Question 1: Was the sequence of actions below successful in achieving the goal? \nChoices: Successful, Unsuccessful", + "bbox": [ + 197, + 224, + 781, + 250 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Question 2: Did the agent perform unnecessary actions that could lead to unintended side effects? \nChoices: Yes, No", + "bbox": [ + 197, + 262, + 797, + 300 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Question 3: Did the agent perform the task optimally, by only performing necessary actions and avoiding unnecessary ones?", + "bbox": [ + 197, + 311, + 797, + 339 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Choices: 1. Complete Failure, 2. Suboptimal, 3. Somewhat Optimal, 4. Completely Optimal", + "bbox": [ + 197, + 349, + 795, + 376 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Question 4: Did the agent loop through a sequence of actions that did not make progress towards the goal? \nChoices: Yes, No", + "bbox": [ + 197, + 388, + 774, + 426 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "You should provide your reasoning process for each question, and you can refer to the screenshots and the accessibility tree to support your answers. Your answer must follow the following format:", + "bbox": [ + 197, + 438, + 795, + 476 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "your reasoning here answer answer answer answer", + "bbox": [ + 197, + 488, + 509, + 551 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Figure 5: System Prompt Template used for the simplified judge", + "bbox": [ + 284, + 580, + 712, + 597 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "User Prompt Template", + "text_level": 1, + "bbox": [ + 197, + 616, + 366, + 633 + ], + "page_idx": 22 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "The user goal is: {goal} \nThe agent performed the following actions: \nStep: {step_number} \nURL: {url} \nAction: {action} \nReasoning: {reasoning} \n----- \nThe last accessibility tree is: \n{axtree} \nHere is the screenshot of the last step. \n{screenshot} \nProvide your reasoning and answer the four questions from the system prompt, using \nthe specified format.", + "guess_lang": "txt", + "bbox": [ + 197, + 643, + 799, + 873 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Figure 6: User Prompt Template used for the simplified judge", + "bbox": [ + 292, + 902, + 702, + 917 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 22 + } +] \ No newline at end of file diff --git a/data/2025/2504_08xxx/2504.08942/eb06eaaa-dd2d-4310-8824-a39ee9e3457c_model.json b/data/2025/2504_08xxx/2504.08942/eb06eaaa-dd2d-4310-8824-a39ee9e3457c_model.json new file mode 100644 index 0000000000000000000000000000000000000000..4ee0f5e9cde396f8890c5b44435ab31f6d9f99e7 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08942/eb06eaaa-dd2d-4310-8824-a39ee9e3457c_model.json @@ -0,0 +1,2457 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.285, + 0.058, + 0.712 + ], + "angle": 270, + "content": "arXiv:2504.08942v2 [cs.LG] 6 Oct 2025" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.1, + 0.825, + 0.141 + ], + "angle": 0, + "content": "AGENTREWARDBENCH: Evaluating Automatic Evaluations of Web Agent Trajectories" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.166, + 0.528, + 0.183 + ], + "angle": 0, + "content": "Xing Han Lu12 Amirhossein Kazemnejad*2" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.183, + 0.767, + 0.198 + ], + "angle": 0, + "content": "Nicholas Meade12 Arkil Patel12 Dongchan Shin2 Alejandra Zambrano2" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.198, + 0.77, + 0.214 + ], + "angle": 0, + "content": "Karolina Stanczak12 Peter Shaw4 Christopher J. Pal2567 Siva Reddy1257" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.214, + 0.812, + 0.23 + ], + "angle": 0, + "content": "*Core contributor ¹McGill University ²Mila Quebec AI Institute ⁴Google DeepMind" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.23, + 0.739, + 0.244 + ], + "angle": 0, + "content": "5Canada CIFAR AI Chair 6Polytechnique Montréal 7ServiceNow Research" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.244, + 0.588, + 0.258 + ], + "angle": 0, + "content": "xing.han.lu@mail.mcgill.ca; siva.reddy@mila.quebec" + }, + { + "type": "title", + "bbox": [ + 0.46, + 0.294, + 0.54, + 0.31 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.327, + 0.77, + 0.633 + ], + "angle": 0, + "content": "Web agents enable users to perform tasks on web browsers through natural language interaction. Evaluating web agents trajectories is an important problem, since it helps us determine whether the agent successfully completed the tasks. Rule-based methods are widely used for this purpose, but they are challenging to extend to new tasks and may not always recognize successful trajectories. We may achieve higher accuracy through human evaluation, but the process would be substantially slower and more expensive. Automatic evaluations with LLMs may avoid the challenges of designing new rules and manually annotating trajectories, enabling faster and cost-effective evaluation. However, it is unclear how effective they are at evaluating web agents. To this end, we propose AGENTREWARD-BENCH, the first benchmark to assess the effectiveness of LLM judges for evaluating web agents. AGENTREWARD-BENCH contains 1302 trajectories across 5 benchmarks and 4 LLMs. Each trajectory in AGENTREWARD-BENCH is reviewed by an expert, who answers questions pertaining to the success, side effects, and repetitiveness of the agent. Using our benchmark, we evaluate 12 LLM judges and find that no single LLM excels across all benchmarks. We also find that the rule-based evaluation used by common benchmarks tends to underreport the success rate of web agents, highlighting a key weakness of rule-based evaluation and the need to develop more flexible automatic evaluations. We release the benchmark at: https://agent-reward-bench.github.io" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.665, + 0.32, + 0.68 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.694, + 0.828, + 0.794 + ], + "angle": 0, + "content": "Giving a Large Language Model (LLM) access to a web browser unlocks an entirely new capability paradigm: beyond interacting with a user through a chat interface, such models can interact with the online world to complete tasks similar to how a human would. The promise of a new paradigm has motivated the design of LLMs to control interfaces such as web browsers, starting from earlier foundation models such as ACT-1 (Adept, 2022) to the more recent OpenAI Operator (OpenAI, 2025) and Claude Computer use (Anthropic, 2024a), showing promising results in real-world tasks (Zhou et al., 2024)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.799, + 0.828, + 0.926 + ], + "angle": 0, + "content": "To measure the progress of web agents, a well-designed benchmark should compile a collection of realistic tasks across diverse websites. As illustrated in Figure 1, a user may ask the agent to locate a Classifieds listing for a Google Pixel phone and submit an offer via a comment. Inside a dedicated environment (e.g., a self-hosted Classifieds site), the web agent would complete the task by filling the search bar, identifying the correct listing, and writing a comment to show interest in purchasing the item. To determine if the agent successfully completed the request, we need to automatically evaluate the agent's chosen actions – known as trajectories – using a set of rules uniquely designed for the task of finding a Pixel phone on Classifieds. As expected, rule-based evaluation is time-consuming for" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.186, + 0.103, + 0.813, + 0.341 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.352, + 0.825, + 0.392 + ], + "angle": 0, + "content": "Figure 1: Example from AGENTREWARDBENCH, where an LLM judge evaluates a web agent's trajectory on VisualWebArena (Koh et al., 2024). The benchmark compares judgments against expert annotations to determine the effectiveness of the judge for evaluating web agents." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.43, + 0.827, + 0.502 + ], + "angle": 0, + "content": "experts to design, and may not cover every successful scenario (e.g., what if the agent finds a different but valid listing?). It is also possible for an expert to annotate the trajectories, but it would be slow and expensive to scale across many web agents. This brings us to the following questions: Given a web agent trajectory, can an LLM decide if it is successful? If so, how do we determine which LLM is the most capable at evaluating web agents?" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.507, + 0.828, + 0.621 + ], + "angle": 0, + "content": "Past works have shown that LLMs can be used as judges to evaluate the output of LLM chatbots (Zheng et al., 2023). More recently, LLM judges have been used for automatically evaluating trajectories from web agents (Pan et al., 2024; Murty et al., 2025; Trabucco et al., 2025). With highly accurate automatic evaluation methods, we can measure the progress of web agents on new sets of tasks, use them to synthesize trajectories for finetuning smaller models, and design reward models that can be used in a reinforcement learning (RL) setting. However, it remains unclear whether current automatic evaluators, whether rule-based or LLM-based, can predict the success of a trajectory in a way that reflects expert judgment." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.625, + 0.828, + 0.821 + ], + "angle": 0, + "content": "To address this problem, we introduce AGENTREWARDBENCH (§3), a benchmark for determining the capability of an LLM at evaluating web agents (see Figure 1). It consists of 1300 trajectories produced by 4 popular LLM agents on 5 diverse web environments, ranging from common tasks like online shopping and posting on a forum, to highly specialized requests in professional environments, such as updating task schedules on IT task management platforms. Each trajectory is labeled by expert annotators to determine whether the agent successfully completed the task, caused unintended side effects, or entered cycles of repetitive actions. Using this benchmark, we evaluate both existing and novel LLM judges (§4) alongside rule-based evaluation. We find that rule-based methods, which are used as the official automatic evaluation by environment-based benchmarks, severely underestimate the capabilities of agents and do not reflect how experts define success (§5). We further provide an in-depth analysis (§6) that highlights the weaknesses of existing LLMs when used as judges. Overall, we believe AGENTREWARDBENCH can be used to enable better automatic evaluation and reward modeling for web agents." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.838, + 0.338, + 0.854 + ], + "angle": 0, + "content": "2 Related Works" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.868, + 0.828, + 0.926 + ], + "angle": 0, + "content": "Web Agents and Environments Designing agents that can automatically navigate user interfaces has been a long standing problem; earlier approaches employed program-based heuristics (St. Amant & Zettlemoyer, 2000), whereas later works on web navigation focus on training reinforcement learning (RL) models (Gur et al., 2018; Humphreys et al., 2022)," + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.506, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.825, + 0.204 + ], + "angle": 0, + "content": "language models (Nakano et al., 2021; Gur et al., 2023; Deng et al., 2023) and multimodal models (Shaw et al., 2023; Lu et al., 2024; Zheng et al., 2024). To measure the advancements in web agents, various benchmarks have been proposed, with initial works proposing simplified environments (Shi et al., 2017; Liu et al., 2018) and subsequent iterations focusing on specific tasks like web shopping (Yao et al., 2022). More recent benchmarks focus on designing realistic environments that cover commonly used websites (Zhou et al., 2024; Koh et al., 2024) as well as specialized environments (Drouin et al., 2024; Boisvert et al., 2025)." + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.21, + 0.827, + 0.435 + ], + "angle": 0, + "content": "LLM Judges Zheng et al. (2023) proposed using LLMs to predict human preferences of dialogue completion for chat models. They show that a GPT-4-based judge achieves over \\(80\\%\\) agreement with human votes on the task of selecting better completions between models pairs. Follow-up extends this framework to new modalities (Chen et al., 2024), metrics (Feizi et al., 2025) and coding agents (Zhuge et al., 2024); the latter, Agent-as-a-Judge, leverages intermediate feedback from the environment. He et al. (2024) extend the idea by using LLMs to judge trajectories from web agents, allowing them to determine task completion without human annotators, resulting in a high correlation with humans on a private subset of trajectories. To determine the quality of automatic judgments, Pan et al. (2024) evaluate four LLM judges using trajectories from a GPT-4 agent on WebArena tasks, and find that the best judge achieves \\(80.6\\%\\) accuracy against the rule-based evaluator from WebArena. Unlike prior works on LLM judges, we design AGENTREWARDBENCH with trajectories from several LLM agents on diverse web benchmarks, where each one is annotated by human experts on multiple dimensions. By following a human-focused approach similar to Lambert et al. (2024), we ensure that LLM judges are evaluated against expert preferences on a wide range of scenarios." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.442, + 0.828, + 0.598 + ], + "angle": 0, + "content": "Trajectory Synthesis Leveraging web environments that can be created and reset without real-world impact, recent works started to explore generating trajectories without human supervision. Leveraging LLM judges and LLM-generated tasks, trajectory synthesis can be used to bootstrap agent-judge training loops (Murty et al., 2024; 2025), to create contrastive pairs (Putta et al., 2024) for direct preference optimization (Rafailov et al., 2023), or as training data to finetune a base model (Lai et al., 2024; Patel et al., 2024; Trabucco et al., 2025). Although all the methods leverage an LLM judge, they lack a clear way of directly determining the quality of judgments, instead relying on the downstream performance improvement to validate their approach. To this end, AGENTREWARDBENCH enables researchers to choose the most appropriate LLM judge for a category of web tasks based on their effectiveness at evaluating web agents." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.615, + 0.418, + 0.63 + ], + "angle": 0, + "content": "3 AGENTREWARDBENCH" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.644, + 0.827, + 0.756 + ], + "angle": 0, + "content": "In this work, we introduce AGENTREWARDBENCH, a benchmark designed to assess the capabilities of LLM judges for evaluating web agents (§3.1). We curate 5 diverse web environments and tasks (§3.2) in order to collect trajectories from web agents based on 4 LLMs (§3.3). For each trajectory, a team of expert annotators carefully reviews the screenshots, actions, and the agent's reasoning chains before labeling them as either successful or unsuccessful, alongside other auxiliary labels (see Figure 2). Finally, we evaluate LLM judges (Table 1) by comparing their predictions with expert annotations to determine their effectiveness for automatic evaluation." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.77, + 0.391, + 0.784 + ], + "angle": 0, + "content": "3.1 Assessment Framework" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.795, + 0.827, + 0.922 + ], + "angle": 0, + "content": "Trajectory Definition Let \\( o_i \\) be an observation of a browser at time step \\( i \\), \\( a_i \\) be an action that can be executed on a webpage through a browser navigation engine \\( B \\) such that \\( o_{i+1} = B(o_i, a_i) \\), and \\( r_i \\) be the reasoning for choosing the action. We define a web agent trajectory as the sequence \\( \\mathcal{T} = \\{o_1, (r_1, a_1), o_2, (r_2, a_2), \\ldots, o_{n-1}, (r_{n-1}, a_{n-1}), o_n\\} \\) where \\( o_n \\) is the final observation in the trajectory. Each observation contains a screenshot of the browser \\( s_i \\), the Document Object Model (DOM) tree representation of the browser, and an accessibility (A11Y) tree rendered from the DOM tree. For the observation to be useful for an LLM agent, we need a representation function \\( R \\) that produces \\( p_i = R(o_1, r_1, a_1, \\ldots, o_i) \\), which can be used as an input for an LLM. If the agent is multimodal, \\( o_i \\) would include" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.186, + 0.102, + 0.813, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.249, + 0.825, + 0.302 + ], + "angle": 0, + "content": "Figure 2: AGENTREWARDBENCH creation process. We first collect trajectories from LLM agents inside web environments using instructions from several benchmarks. Then, the trajectories are reviewed by expert annotators, who indicate if the trajectory is successful, led to side effects, and contains repetition cycles. Finally, we use the annotated trajectories to evaluate LLM judges." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.341, + 0.825, + 0.399 + ], + "angle": 0, + "content": "screenshots; otherwise, it would be a textual representation of the page (e.g., accessibility tree). Then, \\( p_i \\) is given to a language model to produce a completion \\( c_i = \\mathrm{LM}(p_i) \\), or \\( c_i = \\mathrm{VLM}(p_i, s_i) \\) in the case of a multimodal LLM. The completion is parsed by an execution function \\( E \\) to produce \\( (a_i, r_i) = E(c_i) \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.407, + 0.825, + 0.478 + ], + "angle": 0, + "content": "Annotation Design For each trajectory, an expert annotator reviews a goal \\( g \\) and sequence \\( \\{s_1, (r_1, a_1), \\ldots, s_{n-1}, (r_{n-1}, a_{n-1}), s_n\\} \\) in order to answer questions \\( \\mathcal{Q} = \\{q_1, \\ldots, q_m\\} \\). We consider the answers produced, \\( \\mathcal{A}^* = \\{a_1^*, \\ldots, a_m^*\\} \\), as the ground truth annotations for the trajectory, which indicate whether the agent successfully completed \\( g \\). To collect \\( \\mathcal{A}^* \\), we use the following \\( \\mathcal{Q} \\) in the annotation guidelines:" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.495, + 0.727, + 0.511 + ], + "angle": 0, + "content": "1. Success: Was the sequence of actions successful in achieving the goal?" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.527, + 0.825, + 0.556 + ], + "angle": 0, + "content": "2. Side Effect: Did the agent perform unnecessary actions that could lead to unintended side effects?" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.574, + 0.825, + 0.604 + ], + "angle": 0, + "content": "3. Repetition Cycle: Did the agent loop through a sequence of actions that did not make progress towards the goal?" + }, + { + "type": "list", + "bbox": [ + 0.197, + 0.495, + 0.825, + 0.604 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.621, + 0.825, + 0.718 + ], + "angle": 0, + "content": "Agreement with respect to success is the primary criterion with which we evaluate LLM judges. The remaining can be useful as auxiliary criteria for detecting issues ahead of time. For example, if an agent purchases several irrelevant products when the user only requested one, then the trajectory would be flagged for side effects, independent of task success. A judge can also indicate the presence of a cycle, for example, if the agent repeatedly clicks on a disabled button. Both signals can be used to penalize the agent during training or steer it to another action at inference." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.728, + 0.825, + 0.812 + ], + "angle": 0, + "content": "Annotation Setup The team of annotators consisted of 6 experts with a deep understanding of the tasks and environments through their research on web agents. They used a custom-built user interface that displays each trajectory with screenshots, actions, and reasoning. Rating disagreements were resolved by annotators discussing among themselves until clear annotations can be produced for ambiguous trajectories. Moreover, the annotators also have access to the environment and accessibility trees when screenshots are insufficient." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.82, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Judge Model Given a goal \\( g \\), trajectory \\( \\mathcal{T} \\) and questions \\( \\mathcal{Q} \\), a judge model returns a judgment \\( \\hat{\\mathcal{A}} \\), which is an estimate of \\( \\mathcal{A}^* \\). We can use \\( \\hat{\\mathcal{A}} \\) to derive a reward in RL or to automatically evaluate web agents when \\( \\mathcal{A}^* \\) is unavailable. To implement the judge, we need a judge-specific function \\( R_j \\) that produces a representation of the trajectory, \\( p = R_j(o_1, r_1, a_1, \\ldots, o_n) \\). \\( R_j = \\) can vary substantially, ranging from a simple list of actions \\( a_1, \\ldots, a_{n-1} \\), to using another LLM to process the observation history. We describe judges used in previous works and introduce a simplified judge in Section 4 and provide supplementary details in Section A.3." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.104, + 0.396, + 0.117 + ], + "angle": 0, + "content": "3.2 Tasks and Environments" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.129, + 0.827, + 0.2 + ], + "angle": 0, + "content": "We select 5 benchmarks designed to evaluate web agents inside dedicated environments and real websites, including general-purpose (Zhou et al., 2024), vision-focused (Koh et al., 2024), real-world (Yoran et al., 2024), and enterprise-oriented (Drouin et al., 2024; Boisvert et al., 2025) tasks. In total, we curate 351 unique tasks across 8 environments and 66 websites, which we separate into 51 development and 300 test tasks (details in Section A.1)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.208, + 0.828, + 0.295 + ], + "angle": 0, + "content": "WebArena (WA; Zhou et al. 2024) This benchmark comprises 6 self-hosted websites covering a wide range of domains: customer relationship management, map navigation, online encyclopedia, shopping site, social forum, and software development collaboration platform. Each environment is derived from real open-source projects that develop self-hosted environments for both commercial and personal usage. Each task consists of a textual goal that requires a good understanding of one or multiple environments to complete." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.302, + 0.826, + 0.388 + ], + "angle": 0, + "content": "VisualWebArena (VWA; Koh et al. 2024) To complement WebArena's text-based goals, we also include VisualWebArena (VWA), a benchmark focusing on tasks that require visual reasoning to complete. For instance, a user may include an image alongside the goal, or the task could be designed to only be solved if the agent selects an item with a unique visual characteristic (e.g., purchasing a TV with the widest bezel). VWA also introduces a new online marketplace environment (Classifieds)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.394, + 0.825, + 0.452 + ], + "angle": 0, + "content": "AssistantBench (AB; Yoran et al. 2024) In addition to the self-hosted environments, we consider trajectories resulting from agent execution on real-world websites. This benchmark defines tasks that require navigating the internet, starting from a search engine. Since the test set is private, we use the validation set, which consists of 33 unique tasks." + }, + { + "type": "title", + "bbox": [ + 0.17, + 0.46, + 0.826, + 0.474 + ], + "angle": 0, + "content": "WorkArena (Work; Drouin et al. 2024) and WorkArena++ (Wk++; Boisvert et al. 2025)" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.474, + 0.826, + 0.575 + ], + "angle": 0, + "content": "To increase the diversity of tasks relevant to professional environments, we incorporate WorkArena (Boisvert et al., 2025), a benchmark of 18 basic tasks on ServiceNow, a software-as-a-service platform for professional workflows in the information technology (IT), human resources, and customer management domains. WorkArena++ introduces tasks with greater complexity, requiring planning and reasoning to correctly complete multiple sub-tasks. Including this alongside WorkArena allows us to evaluate judges on a wider range of task difficulty. We focus on the Level 2 tasks since Level 3 is too challenging for current agents." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.588, + 0.362, + 0.604 + ], + "angle": 0, + "content": "3.3 Web Agents Design" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.613, + 0.825, + 0.644 + ], + "angle": 0, + "content": "To collect trajectories on the 5 benchmarks, we design web agents using two models from major commercial providers and two open-weight LLMs." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.653, + 0.827, + 0.78 + ], + "angle": 0, + "content": "LLM backbones On the commercial side, we use OpenAI's \\(GPT-4o^2\\) (Hurst et al., 2024) and Anthropic's Claude 3.7 Sonnet (Anthropic, 2024b). They are the flagship models of their respective providers, both of which offer computer-use agents powered by their LLMs, namely OpenAI Operator (OpenAI, 2025) and Anthropic Claude's Computer use (Anthropic, 2024a). We select two leading open-weights LLMs to complement the commercial LLMs: Llama-3.3-70B (Grattafori et al., 2024) and Qwen2.5-VL (Bai et al., 2025). In both cases, we choose the instruction-tuned variant, which have undergone post-training for tool-use or UI navigation. Moreover, since Llama-3.3 is a text-only model, it was excluded from VisualWebArena, which requires image-based reasoning." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.788, + 0.827, + 0.845 + ], + "angle": 0, + "content": "Agent Platform By default, each LLM backbone receives an input processed by a representation function \\( R \\) and generates a completion \\( c_{i} \\). Then, \\( c_{i} \\) is interpreted as an action by an execution function \\( E \\). To implement \\( E \\), we use AgentLab and BrowserGym (Chezelles et al., 2025), an ecosystem for designing web agents using LLMs (details in Section A.1)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.853, + 0.827, + 0.883 + ], + "angle": 0, + "content": "Trajectory Annotations and Splits We collect a total of 1302 trajectories from our 4 LLM-based web agents across five benchmarks. Based on the task split (§3.2), 196 trajectories are" + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.896, + 0.44, + 0.91 + ], + "angle": 0, + "content": "1https://developer.servicenow.com" + }, + { + "type": "page_footnote", + "bbox": [ + 0.193, + 0.911, + 0.453, + 0.924 + ], + "angle": 0, + "content": "2We use the version gpt-4o-2024-11-20" + }, + { + "type": "list", + "bbox": [ + 0.191, + 0.896, + 0.453, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.18, + 0.102, + 0.822, + 0.327 + ], + "angle": 0, + "content": "
CategoryJudgeOverallABVWAWA PrecisionWorkWk++
PrecisionRecallF1
OfficialRule-based*83.855.967.125.085.279.0100.083.3
ExistingAER-C67.771.969.783.356.068.8100.066.7
AER-V67.671.569.583.361.267.696.459.3
NNetNav52.582.464.120.854.554.377.343.2
Ours (A)Claude 3.7 S.68.881.674.787.561.069.385.066.7
GPT-4o69.883.175.977.863.070.294.663.0
GPT-4o Mini61.586.171.780.057.963.584.249.4
Llama 3.367.779.072.975.059.668.294.362.7
Qwen2.5-VL64.389.875.072.759.363.687.260.3
Ours (S)Claude 3.7 S.69.476.372.771.464.869.385.366.7
GPT-4o68.180.373.777.860.769.993.859.6
GPT-4o Mini64.578.370.880.057.466.990.354.8
Qwen2.5-VL64.586.173.770.058.562.993.864.4
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.337, + 0.825, + 0.378 + ], + "angle": 0, + "content": "Table 1: Judge performance for predicting success, measured with precision (§4.2). We report recall and F1 as auxiliary scores. We examine two variants of the simplified judge: one with the final accessibility tree (A), and the other with the final screenshot (S). *Rule-based evaluation are included for reference." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.416, + 0.827, + 0.49 + ], + "angle": 0, + "content": "in the development split and 1106 are in the test split (details in A.2). The annotators follow the process described in Section 3.1 to label all trajectories, producing a total of 3906 binary annotations. To assess agreement between annotators, we annotated the GPT-4o agent's trajectory on WebArena with a second annotator. We obtained an inter-annotator agreement of \\(89.3\\%\\) on success, indicating a high level of consistency among annotators." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.505, + 0.441, + 0.525 + ], + "angle": 0, + "content": "4 LLM judges for web tasks" + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.536, + 0.391, + 0.552 + ], + "angle": 0, + "content": "4.1 Judge implementations" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.561, + 0.827, + 0.675 + ], + "angle": 0, + "content": "We consider two existing implementations of LLM judges for web agents, Agent Eval Refine (AER; Pan et al. 2024) and NNetNav (Murty et al., 2025), and introduce a simplified judge that simultaneously predicts success, side effects, and repetition. In Agent-as-a-Judge (Zhuge et al., 2024), the method assumes the judge can interact with the environment after the agent finishes executing actions, which isn't feasible when the environment state cannot be preserved or shareable across agents. Other LLM judge variants were proposed (He et al., 2024; Putta et al., 2024; Lai et al., 2024; Trabucco et al., 2025), but our three judge implementations cover major strategies for representing trajectories." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.682, + 0.828, + 0.768 + ], + "angle": 0, + "content": "AER (Pan et al., 2024) The judge in this framework takes as input the sequence of agent thoughts and actions alongside the final browser state, which is either passed to a vision-enabled model as a screenshot (AER-V) or as a caption generated by a captioner model (AER-C). Then, the judge outputs its reasoning before predicting success or failure. For both the judge and captioner, we implement this method using GPT-4o, which is an overall stronger model than the GPT-4 (Achiam et al., 2023) model originally used." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.775, + 0.828, + 0.86 + ], + "angle": 0, + "content": "NNNetNav (Murty et al., 2025) In this work, a Llama 3.1 70B judge receives a summary of changes across all observations and has to give a rating between 1 (worst) and 5 (best) after providing the thought process; the rating is binarized by thresholding at 4, based on the original implementation. To generate summaries, an LLM is used to describe the change between two observations based on the accessibility trees instead of screenshots. We use Llama 3.3 70B (Al-Dahle, 2024), an improved version of the original backbone." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.868, + 0.828, + 0.927 + ], + "angle": 0, + "content": "Simplified judge (ours) We propose a simplified design for our judge. First, it directly answers the three questions asked to the annotators. This allows it to return multiple labels within a single completion. Then, we decouple the system prompt and reasoning chain from the final state representation, allowing the judge to receive either the accessibility tree or the" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.207, + 0.102, + 0.79, + 0.193 + ], + "angle": 0, + "content": "
A11YScreenSuccessSide EffectRepetition
PRF1PRF1PRF1
62.181.770.66.531.910.892.516.828.4
X61.586.171.77.270.813.078.646.458.3
X64.578.370.86.631.911.092.318.530.8
XX60.773.966.77.276.413.278.159.167.3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.203, + 0.825, + 0.232 + ], + "angle": 0, + "content": "Table 2: Ablation study of our GPT-4o mini judge, measured in precision (P), recall (R), and F1. We consider how including accessibility trees and screenshots in the input affects the predictions." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.259, + 0.825, + 0.318 + ], + "angle": 0, + "content": "screenshot. This differs from AER, which requires a vision-enabled model, and NNetNav, which requires a long-context model capable of receiving multiple accessibility trees. Our method is compatible with both multimodal and text-only LLMs and does not require a separate LLM to caption the screenshot or summarize changes across observations." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.33, + 0.295, + 0.344 + ], + "angle": 0, + "content": "4.2 Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.355, + 0.827, + 0.482 + ], + "angle": 0, + "content": "To evaluate LLM judges, we use the precision score, which is the ratio of true positives over all predicted positives (true + false positives). The metric is a good fit for rejection finetuning (RFT), where we are interested in increasing the number of true positives (actual successful trajectories) while reducing the number of false positives (failed trajectories added to the dataset due to poor LLM judgments). For reward modeling, we also want to prioritize true positives since they are the primary signals for many RL algorithms, while false positives should be minimized to avoid introducing noise to the loss function. Moreover, recall and F1 benefit from minimizing false negatives, which is useful for improving sample efficiency by reducing the number of valid trajectories removed; we report them as auxiliary metrics." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.495, + 0.356, + 0.51 + ], + "angle": 0, + "content": "4.3 Judge Performance" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.52, + 0.825, + 0.604 + ], + "angle": 0, + "content": "In Table 1, we provide an overview of the performance of judges across benchmarks using the metrics defined in Section 4.2. We find that GPT-40 and Claude 3.7 Sonnet-based simplified judges achieve higher precision compared to prior approaches, indicating that removing the internal LLMs for captioning or summarizing changes does not hinder their capabilities. Notably, no judge consistently stands out across benchmarks, highlighting the importance of selecting an appropriate LLM backbone based on the nature of the task." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.613, + 0.827, + 0.725 + ], + "angle": 0, + "content": "Low precision limits existing judges We notice that no judge achieves above \\(70\\%\\) precision, which means that \\(30\\%\\) of trajectories are erroneously marked as successful. This severely limits the usefulness of the judges for downstream applications, such as using the filtered trajectories for finetuning an agent, as the agent will learn to generate incorrect trajectories for a substantial portion of the tasks. This indicates LLM judges are currently not a reliable way of assessing the true capabilities of agents. Consequently, judges will need to achieve higher precision before they are useful for automatic evaluation, which also affects their downstream utility for methods like RFT and RL." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.733, + 0.827, + 0.887 + ], + "angle": 0, + "content": "Official rule-based evaluation underestimates success Similar to LLM judges, the rule-based evaluation used by benchmarks can be compared with expert annotations. Since they use task-specific configurations to determine success, they may reject successful trajectories due to inconsequential differences. For instance, in WebArena, if a user asks \"What's the closest national park to the largest city in Maine?\", the agent may reply: \"The closest national park to Portland [...] is Acadia National Park\". Rule-based evaluation considers it unsuccessful since the configuration requires it to exactly match \"Acadia National Park\". As a result, the rule-based approach achieves a recall of \\(55.9\\%\\), indicating a higher rate of false negatives compared to LLM judges. Overall, a substantial precision gap exists between rule-based methods and LLM judges, but rule-based methods severely underestimate the true performance of web agents, highlighting the need for more flexible automatic evaluation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Impact of Input Representation Browser screenshots represent an intuitive state for humans, but LLMs may need more than vision alone, as screenshots miss page structure" + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.948, + 0.506, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.182, + 0.102, + 0.816, + 0.194 + ], + "angle": 0, + "content": "
AgentHumanGPT-4o JudgeRule-based
VWAWAWk++VWAWAWk++VWAWAWk++
Claude 3.7 S.28.355.118.434.864.120.723.930.88.1
GPT-4o35.942.318.447.850.011.517.425.64.6
Llama 3.30.022.49.20.027.65.80.018.43.5
Qwen2.5-VL21.733.313.834.852.614.917.429.511.5
" + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.204, + 0.825, + 0.232 + ], + "angle": 0, + "content": "Table 3: Success Rate of web agents measured by expert annotators, GPT-4o Judge (with accessibility tree) and rule-based evaluation on various benchmarks (§3.2). Results by environment are in Table 6." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.268, + 0.827, + 0.354 + ], + "angle": 0, + "content": "and hidden attributes found in accessibility trees. To investigate the impact of different representations, we ablate our GPT-4o-mini simplified judge in Table 2. We observe that only including screenshots achieves a high precision for success and repetition, whereas only including accessibility trees allows higher recall. Surprisingly, including both accessibility trees and screenshots yields a lower performance than including only the screenshot, indicating that more information distracts rather than assists the judge." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.371, + 0.604, + 0.389 + ], + "angle": 0, + "content": "5 Revisiting how we evaluate task success rate" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.401, + 0.827, + 0.485 + ], + "angle": 0, + "content": "One of the core applications of LLM judges is to estimate the success rate on a web navigation benchmark, which is useful in scenarios where there are no dedicated functions to calculate the rule-based success rate, which is the standard evaluation for many web agent benchmarks. However, rule-based approaches may not always agree with experts. In Table 3, we compare the success rate calculated from expert annotations, rule-based evaluation, and a GPT-4o judge with accessibility trees." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.494, + 0.827, + 0.662 + ], + "angle": 0, + "content": "Rule-based evaluation does not reflect expert-defined success rates We notice a stark difference between the judge and rule-based approach: whereas the LLM judge tends to overestimate the success rate of every agent (with two exceptions in WorkArena++) rule-based methods consistently underestimate it. Moreover, the underestimation varies substantially, with the performance of GPT-4o being \\(16.7\\%\\) lower on WebArena and \\(18.5\\%\\) lower on VWA compared to expert annotations. This highlights a major discrepancy between the official task success rate reported by rule-based methods and the success rate according to expert annotators. For instance, rule-based evaluation ranks Qwen2.5-VL above GPT-4o on WebArena and WorkArena++ (and equally on VWA), whereas expert annotators prefer GPT-4o over Qwen2.5-VL on all benchmarks, with over \\(14\\%\\) higher success rate on VWA. Overall, this stresses the need to develop new methods to calculate task success rate that more precisely reflect expert judgments." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.68, + 0.336, + 0.697 + ], + "angle": 0, + "content": "6 Error Analysis" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.709, + 0.825, + 0.74 + ], + "angle": 0, + "content": "In this section, we qualitatively examine failure cases of LLM judges. Following a GPT-4o judge, we focus on the common error categories to understand how LLM judges fail." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.747, + 0.827, + 0.888 + ], + "angle": 0, + "content": "Grounding mismatch When an agent misunderstands what is happening on the screen, its thought process may not reflect the reality of the webpage. In such cases, a judge without access to the screenshots may produce an erroneous judgment due to the agent misunderstanding what is happening on the screen. For example, when a user requests to show \"the product page for the item in the second row, second column,\" the Qwen2.5-VL agent ends up selecting the second item in the first row. However, it writes in its reasoning chain that \"Based on the layout of the page, the second row, second column item is the [energy Drink].\" The judge accepts the agent's thought process without realizing it opened the wrong page: \"The agent's goal was to navigate to the product page for the item in the second row, second column. The agent successfully reached the correct product page.\"" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.827, + 0.927 + ], + "angle": 0, + "content": "Misleading agent reasoning The agent may have misleading elements in its reasoning, leading the judge to reason that the agent completed the task correctly. In a multi-step" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.182, + 0.102, + 0.512, + 0.304 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.52, + 0.104, + 0.818, + 0.304 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.314, + 0.825, + 0.366 + ], + "angle": 0, + "content": "Figure 3: Example of judge error (discussed in Section 6). In this example, the user requests the agent to buy cereal in VisualWebArena, but the agent stops after adding it to the cart. The judge erroneously identified the trajectory as successful, even though the agent missed an important detail in the instruction." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.403, + 0.825, + 0.475 + ], + "angle": 0, + "content": "WorkArena++ task, the user requested the agent to apply a search filter to include a unique ID. After several unsuccessful attempts, the agent ended up stating it succeeded in its reasoning chain, even though no filter was applied. The judge was misled by the agent and wrote in its own reasoning chain that \"The agent successfully [...] applied the filter to extract entries...\" before incorrectly indicating that the trajectory was successful." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.482, + 0.827, + 0.596 + ], + "angle": 0, + "content": "Missed instruction details In some cases, the agent does not complete the task completely, missing crucial details from the instruction (see Figure 3). For example, when the user requests to \"buy the cheapest cereal with a graphic character on the box in the Cereals category,\" the agent finds the correct product and informs the customer: \"I've found the cheapest cereal with a graphic character on the box. It's Cocoa Puffs, 10.4 Oz Box...\" However, it missed a crucial detail: the user requested that they buy the product. Unfortunately, the judge mistakenly believes that the agent completed a purchase: \"The agent successfully identified and purchased the cheapest cereal with a graphic character on the box...\"" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.603, + 0.825, + 0.73 + ], + "angle": 0, + "content": "Misunderstanding action intents We found that the agent may misuse certain actions, leading to the trajectory to fail very close to completion. In such cases, the LLM judge may incorrectly decide that the trajectory is successful and ignore the misused action. In one instance where the goal was to answer \"What is the minimum travel time by car from Animal Rescue League of Pittsburgh to Schenley park?\", the Qwen2.5-VL agent completes all required actions, but ends up reporting the task as unfeasible instead of replying to the user. The GPT-4o judge (with screenshot) correctly reasons that the travel time was shown on the screen, but does not point out that reporting the task as unfeasible is incorrect, instead asserting that \"all actions were confined to the task of finding the travel time.\"" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.735, + 0.827, + 0.807 + ], + "angle": 0, + "content": "Overall, current LLM judges are limited by their capability to detect nuanced issues within trajectories, as shown by the judge missing details and misunderstanding an action. Moreover, they will easily agree with the agent's reasoning even when it is wrong, which has been previously observed in LLMs (Sharma et al., 2023). Future research should aim to address these issues to improve the performance of LLM judges for evaluating web agents." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.825, + 0.309, + 0.84 + ], + "angle": 0, + "content": "7 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.854, + 0.825, + 0.926 + ], + "angle": 0, + "content": "We introduce AGENTREWARDBENCH, a benchmark for evaluating LLM judges for web agent trajectories. The benchmark consists of over 1300 trajectories, each annotated by experts across three dimensions: whether the agent succeeded, whether it caused unintended side effects, and whether it repeated unnecessary actions. We evaluate 12 LLM judges on AGENTREWARDBENCH and find that simpler input representation can achieve" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.959 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.204 + ], + "angle": 0, + "content": "higher agreement with expert annotators compared to prior approaches. Moreover, we find that rule-based evaluation, often used by environment-based benchmarks, does not achieve a lower-than-expected agreement with experts. Instead, it tends to reject many valid trajectories, which results in the success rate of certain web agents being lower than what an expert would perceive. Overall, we believe our benchmark will help researchers design better LLM judges for web agents trajectories, which will enable the design of automatic evaluators and reward models that better reflect expert judgments." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.221, + 0.346, + 0.239 + ], + "angle": 0, + "content": "Acknowledgments" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.25, + 0.828, + 0.308 + ], + "angle": 0, + "content": "Xing Han Lu acknowledges the support of the Natural Sciences and Engineering Research Council of Canada (NSERC) [funding reference no. 579403]. The project is supported by the Google-Mila grant. We thank Alexandre Lacoste, Shikhar Murty, and the McGill NLP group members for helpful discussions." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.325, + 0.276, + 0.342 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.355, + 0.827, + 0.412 + ], + "angle": 0, + "content": "Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023. URL https://arxiv.org/abs/2303.08774." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.419, + 0.805, + 0.435 + ], + "angle": 0, + "content": "Adept. Act-1: Transformer for actions, 2022. URL https://www.adept.ai/blog/act-1/." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.441, + 0.827, + 0.47 + ], + "angle": 0, + "content": "Ahmad Al-Dahle. The future of ai: Built with llama, December 2024. URL https://ai.meta.com/blog/future-of-ai-built-with-llama/." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.477, + 0.827, + 0.506 + ], + "angle": 0, + "content": "Anthropic. Introducing computer use, a new claude 3.5 sonnet, and claude 3.5 haiku, 2024a. URL https://www.anthropic.com/news/3-5-models-and-computer-use." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.513, + 0.827, + 0.542 + ], + "angle": 0, + "content": "Anthropic. The claude 3 model family: Opus, sonnet, haiku, 2024b. URL https://apisemantic scholar.org/CorpusID:268232499." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.549, + 0.827, + 0.62 + ], + "angle": 0, + "content": "Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, Humen Zhong, Yuanzhi Zhu, Mingkun Yang, Zhaohai Li, Jianqiang Wan, Pengfei Wang, Wei Ding, Zheren Fu, Yiheng Xu, Jiabo Ye, Xi Zhang, Tianbao Xie, Zesen Cheng, Hang Zhang, Zhibo Yang, Haiyang Xu, and Junyang Lin. Qwen2.5-vl technical report, 2025. URL https://arxiv.org/abs/2502.13923." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.627, + 0.825, + 0.684 + ], + "angle": 0, + "content": "Léo Boisvert, Megh Thakkar, Maxime Gasse, Massimo Caccia, Thibault Le Sellier De Chezelles, Quentin Cappart, Nicolas Chapados, Alexandre Lacoste, and Alexandre Drouin. Workarena++: Towards compositional planning and reasoning-based common knowledge work tasks, 2025. URL https://arxiv.org/abs/2407.05291." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.69, + 0.827, + 0.721 + ], + "angle": 0, + "content": "Greg Brockman, Vicki Cheung, Ludwig Pettersson, Jonas Schneider, John Schulman, Jie Tang, and Wojciech Zaremba. Openai gym, 2016. URL https://arxiv.org/abs/1606.01540." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.727, + 0.827, + 0.783 + ], + "angle": 0, + "content": "Dongping Chen, Ruoxi Chen, Shilin Zhang, Yinuo Liu, Yaochen Wang, Huichi Zhou, Qihui Zhang, Yao Wan, Pan Zhou, and Lichao Sun. Mllm-as-a-judge: Assessing multimodal llm-as-a-judge with vision-language benchmark, 2024. URL https://arxiv.org/abs/2402.04788." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.79, + 0.827, + 0.875 + ], + "angle": 0, + "content": "Thibault Le Sellier De Chezelles, Maxime Gasse, Alexandre Drouin, Massimo Caccia, Léo Boisvert, Megh Thakkar, Tom Marty, Rim Assouel, Sahar Omidi Shayegan, Lawrence Keunho Jang, Xing Han Lu, Ori Yoran, Dehan Kong, Frank F. Xu, Siva Reddy, Quentin Cappart, Graham Neubig, Ruslan Salakhutdinov, Nicolas Chapados, and Alexandre Lacoste. The browsergym ecosystem for web agent research, 2025. URL https://arxiv.org/abs/2412.05467." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.882, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Xiang Deng, Yu Gu, Boyuan Zheng, Shijie Chen, Sam Stevens, Boshi Wang, Huan Sun, and Yu Su. Mind2web: Towards a generalist agent for the web. Advances in Neural Information Processing Systems, 36:28091-28114, 2023." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.355, + 0.827, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.103, + 0.826, + 0.162 + ], + "angle": 0, + "content": "Alexandre Drouin, Maxime Gasse, Massimo Caccia, Issam H. Laradji, Manuel Del Verme, Tom Marty, Léo Boisvert, Megh Thakkar, Quentin Cappart, David Vazquez, Nicolas Chapados, and Alexandre Lacoste. Workarena: How capable are web agents at solving common knowledge work tasks?, 2024. URL https://arxiv.org/abs/2403.07718." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.171, + 0.827, + 0.215 + ], + "angle": 0, + "content": "Aarash Feizi, Sai Rajeswar, Adriana Romero-Soriano, Reihaneh Rabbany, Valentina Zantedeschi, Spandana Gella, and João Monteiro. Pairbench: Are vision-language models reliable at comparing what they see?, 2025. URL https://arxiv.org/abs/2502.15210." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.224, + 0.827, + 0.268 + ], + "angle": 0, + "content": "Aaron Grattafori, Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Alex Vaughan, et al. The llama 3 herd of models. arXiv preprint, 2024. URL https://arxiv.org/abs/2407.21783." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.277, + 0.825, + 0.309 + ], + "angle": 0, + "content": "Izzeddin Gur, Ulrich Rueckert, Aleksandra Faust, and Dilek Hakkani-Tur. Learning to navigate the web. arXiv preprint arXiv:1812.09195, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.317, + 0.827, + 0.361 + ], + "angle": 0, + "content": "Izzeddin Gur, Hiroki Furuta, Austin Huang, Mustafa Safdari, Yutaka Matsuo, Douglas Eck, and Aleksandra Faust. A real-world webagent with planning, long context understanding, and program synthesis. arXiv preprint arXiv:2307.12856, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.37, + 0.827, + 0.427 + ], + "angle": 0, + "content": "Hongliang He, Wenlin Yao, Kaixin Ma, Wenhao Yu, Yong Dai, Hongming Zhang, Zhenzhong Lan, and Dong Yu. Webvoyager: Building an end-to-end web agent with large multimodal models. ArXiv, abs/2401.13919, 2024. URL https://api-semanticscholar.org/CorpusID:267211622." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.438, + 0.827, + 0.495 + ], + "angle": 0, + "content": "Peter C Humphreys, David Raposo, Toby Pohlen, Gregory Thornton, Rachita Chhaparia, Alistair Muldal, Josh Abramson, Petko Georgiev, Alex Goldin, Adam Santoro, and Timothy Lillicrap. A data-driven approach for learning to control computers, 2022. URL https://arxiv.org/abs/2202.08137." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.505, + 0.827, + 0.55 + ], + "angle": 0, + "content": "Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.559, + 0.827, + 0.616 + ], + "angle": 0, + "content": "Jing Yu Koh, Robert Lo, Lawrence Jang, Vikram Duvvur, Ming Chong Lim, Po-Yu Huang, Graham Neubig, Shuyan Zhou, Ruslan Salakhutdinov, and Daniel Fried. VisualWebArena: Evaluating Multimodal Agents on Realistic Visual Web Tasks, June 2024. URL http://arxiv.org/abs/2401.13649. arXiv:2401.13649 [cs]." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.626, + 0.827, + 0.682 + ], + "angle": 0, + "content": "Hanyu Lai, Xiao Liu, Iat Long Iong, Shuntian Yao, Yuxuan Chen, Pengbo Shen, Hao Yu, Hanchen Zhang, Xiaohan Zhang, Yuxiao Dong, and Jie Tang. Autowebglm: A large language model-based web navigating agent, 2024. URL https://arxiv.org/abs/2404.03648." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.693, + 0.827, + 0.75 + ], + "angle": 0, + "content": "Nathan Lambert, Valentina Pyatkin, Jacob Morrison, LJ Miranda, Bill Yuchen Lin, Khyathi Chandu, Nouha Dziri, Sachin Kumar, Tom Zick, Yejin Choi, Noah A. Smith, and Hannaneh Hajishirzi. Rewardbench: Evaluating reward models for language modeling, 2024. URL https://arxiv.org/abs/2403.13787." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.76, + 0.827, + 0.832 + ], + "angle": 0, + "content": "Evan Zheran Liu, Kelvin Guu, Panupong Pasupat, Tianlin Shi, and Percy Liang. Reinforcement Learning on Web Interfaces using Workflow-guided Exploration. In 6th International Conference on Learning Representations, ICLR 2018, Vancouver, BC, Canada, April 30 - May 3, 2018, Conference Track Proceedings, 2018. URL https://openreview.net/forum?id=ryTp3f-0-." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.841, + 0.824, + 0.872 + ], + "angle": 0, + "content": "Xing Han Lu, Zdenek Kasner, and Siva Reddy. Weblinx: Real-world website navigation with multi-turn dialogue. arXiv preprint arXiv:2402.05930, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.881, + 0.827, + 0.925 + ], + "angle": 0, + "content": "Shikhar Murty, Christopher Manning, Peter Shaw, Mandar Joshi, and Kenton Lee. BAGEL: Bootstrapping Agents by Guiding Exploration with Language, June 2024. URL http://arxiv.org/abs/2403.08140.arXiv:2403.08140 [cs]." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.827, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.103, + 0.829, + 0.147 + ], + "angle": 0, + "content": "Shikhar Murty, Hao Zhu, Dzmitry Bahdanau, and Christopher D. Manning. Nnetnav: Unsupervised learning of browser agents through environment interaction in the wild, 2025. URL https://arxiv.org/abs/2410.02907." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.155, + 0.83, + 0.201 + ], + "angle": 0, + "content": "Reiichiro Nakano, Jacob Hilton, Suchir Balaji, Jeff Wu, Long Ouyang, Christina Kim, Christopher Hesse, Shantanu Jain, Vineet Kosaraju, William Saunders, et al. Webgpt: Browser-assisted question-answering with human feedback. arXiv preprint arXiv:2112.09332, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.209, + 0.826, + 0.239 + ], + "angle": 0, + "content": "OpenAI. Introducing operator, January 2025. URL https://openai.com/index/introducing-operator." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.248, + 0.829, + 0.29 + ], + "angle": 0, + "content": "Jiayi Pan, Yichi Zhang, Nicholas Tomlin, Yifei Zhou, Sergey Levine, and Alane Suhr. Autonomous evaluation and refinement of digital agents, 2024. URL https://arxiv.org/abs/2404.06474." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.3, + 0.829, + 0.344 + ], + "angle": 0, + "content": "Ajay Patel, Markus Hofmacher, Claudiu Leoveanu-Condrei, Marius-Constantin Dinu, Chris Callison-Burch, and Sepp Hochreiter. Large language models can self-improve at web agent tasks, 2024. URL https://arxiv.org/abs/2405.20309." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.353, + 0.829, + 0.396 + ], + "angle": 0, + "content": "Pranav Putta, Edmund Mills, Naman Garg, Sumeet Motwani, Chelsea Finn, Divyansh Garg, and Rafael Rafailov. Agent q: Advanced reasoning and learning for autonomous ai agents, 2024. URL https://arxiv.org/abs/2408.07199." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.405, + 0.826, + 0.449 + ], + "angle": 0, + "content": "Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36:53728-53741, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.457, + 0.829, + 0.529 + ], + "angle": 0, + "content": "Mrinank Sharma, Meg Tong, Tomasz Korbak, David Duvenaud, Amanda Askell, Samuel R. Bowman, Newton Cheng, Esin Durmus, Zac Hatfield-Dodds, Scott R. Johnston, Shauna Kravec, Timothy Maxwell, Sam McCandlish, Kamal Ndousse, Oliver Rausch, Nicholas Schiefer, Da Yan, Miranda Zhang, and Ethan Perez. Towards understanding sycophancy in language models, 2023. URL https://arxiv.org/abs/2310.13548." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.538, + 0.829, + 0.609 + ], + "angle": 0, + "content": "Peter Shaw, Mandar Joshi, James Cohan, Jonathan Berant, Panupong Pasupat, Hexiang Hu, Urvashi Khandelwal, Kenton Lee, and Kristina Toutanova. From pixels to UI actions: Learning to follow instructions via graphical user interfaces. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=3PjCt4kmRx." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.618, + 0.829, + 0.662 + ], + "angle": 0, + "content": "Tianlin Shi, Andrej Karpathy, Linxi Fan, Jonathan Hernandez, and Percy Liang. World of bits: An open-domain platform for web-based agents. In International Conference on Machine Learning, pp. 3135-3144. PMLR, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.671, + 0.83, + 0.728 + ], + "angle": 0, + "content": "Robert St. Amant and Luke S. Zettlemoyer. The user interface as an agent environment. In Proceedings of the Fourth International Conference on Autonomous Agents, AGENTS '00, pp. 483-490, New York, NY, USA, 2000. Association for Computing Machinery. ISBN 1581132301. doi: 10.1145/336595.337575. URL https://doi.org/10.1145/336595.337575." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.737, + 0.829, + 0.78 + ], + "angle": 0, + "content": "Brandon Trabucco, Gunnar Sigurdsson, Robinson Piramuthu, and Ruslan Salakhutdinov. Towards internet-scale training for agents, 2025. URL https://arxiv.org/abs/2502.06776." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.79, + 0.829, + 0.833 + ], + "angle": 0, + "content": "Shunyu Yao, Howard Chen, John Yang, and Karthik Narasimhan. WebShop: Towards Scalable Real-world Web Interaction with Grounded Language Agents. In NeurIPS, 2022. URL https://arxiv.org/abs/2207.01206." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.843, + 0.829, + 0.886 + ], + "angle": 0, + "content": "Ori Yoran, Samuel Joseph Amouyal, Chaitanya Malaviya, Ben Bogin, Ofir Press, and Jonathan Berant. Assistantbench: Can web agents solve realistic and time-consuming tasks?, 2024. URL https://arxiv.org/abs/2407.15711." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.895, + 0.829, + 0.925 + ], + "angle": 0, + "content": "Boyuan Zheng, Boyu Gou, Jihyung Kil, Huan Sun, and Yu Su. Gpt-4v(ision) is a generalist web agent, if grounded, 2024. URL https://arxiv.org/abs/2401.01614." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.103, + 0.83, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.103, + 0.826, + 0.161 + ], + "angle": 0, + "content": "Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric P. Xing, Hao Zhang, Joseph E. Gonzalez, and Ion Stoica. Judging llm-as-a-judge with mt-bench and chatbot arena, 2023. URL https://arxiv.org/abs/2306.05685." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.169, + 0.826, + 0.226 + ], + "angle": 0, + "content": "Shuyan Zhou, Frank F. Xu, Hao Zhu, Xuhui Zhou, Robert Lo, Abishek Sridhar, Xianyi Cheng, Tianyue Ou, Yonatan Bisk, Daniel Fried, Uri Alon, and Graham Neubig. WebArena: A Realistic Web Environment for Building Autonomous Agents, April 2024. URL http://arxiv.org/abs/2307.13854. arXiv:2307.13854 [cs]." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.234, + 0.826, + 0.292 + ], + "angle": 0, + "content": "Mingchen Zhuge, Changsheng Zhao, Dylan Ashley, Wenyi Wang, Dmitrii Khizbullin, Yunyang Xiong, Zechun Liu, Ernie Chang, Raghuraman Krishnamoorthi, Yuandong Tian, Yangyang Shi, Vikas Chandra, and Jürgen Schmidhuber. Agent-as-a-judge: Evaluate agents with agents, 2024. URL https://arxiv.org/abs/2410.10934." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.103, + 0.826, + 0.292 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.173, + 0.103, + 0.315, + 0.119 + ], + "angle": 0, + "content": "A Benchmark" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.133, + 0.504, + 0.149 + ], + "angle": 0, + "content": "A.1 Environment and Experiments Details" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.158, + 0.825, + 0.243 + ], + "angle": 0, + "content": "AssistantBench Although an unlimited number of websites can be visited, we observed that the agents visited a total of 66 unique domains between 1 and 129 times across all trajectories we collected. The number of times a domain was visited can be found in Table 4. Additionally, we replace the default search engine with an alternative search engine (https://duckduckgo.com) as the original homepage blocks browser automation, which renders the tasks unachievable." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.251, + 0.827, + 0.35 + ], + "angle": 0, + "content": "Tasks Subgroups We define the subgroup for WebArena and VisualWebArena as the combination of web domain and evaluation method from the original works. The evaluation methods consist of string matching, HTML-based programs, webpage image querying, and final URL matching. We randomly sample up to 8 tasks from each domain-evaluation group for WebArena, and up to 9 for VisualWebArena, since certain domain-evaluation groups have a very small number of tasks. For WorkArena, we attempt to evenly distribute the task categories. As a result, we have the following task distributions:" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.361, + 0.825, + 0.389 + ], + "angle": 0, + "content": "- WebArena: Wikipedia (8), Map (18), Reddit (18), Shopping Admin (18), Shopping (19), Gitlab (19)" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.393, + 0.786, + 0.409 + ], + "angle": 0, + "content": "- VisualWebArena: Wikipedia (17), Reddit (27), Classifieds (28), Shopping (28)" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.411, + 0.825, + 0.455 + ], + "angle": 0, + "content": "- WorkArena: Sophisticated memory (15), Information retrieval (20), Contextual understanding infeasible tasks (21), Planning and problem solving (22), Data driven decision making and reasoning (22)" + }, + { + "type": "list", + "bbox": [ + 0.214, + 0.361, + 0.825, + 0.455 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.466, + 0.825, + 0.496 + ], + "angle": 0, + "content": "Agent Hyperparameters The binary flags used in AgentLab (Chezelles et al., 2025) are shown in Table 5. We set a maximum limit of 40K input tokens and 8192 output tokens." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.504, + 0.827, + 0.63 + ], + "angle": 0, + "content": "Agent Platform Implementation In addition to abstracting websites and browser engines into Gym-compatible environments (Brockman et al., 2016), BrowserGym (Drouin et al., 2024; Chezelles et al., 2025) offers advanced preprocessing of complex web inputs (i.e., DOM and accessibility trees) and can automatically parse LLM output and execute them as browser actions like clicks, form inputs, tab actions, etc. Additionally, the BrowserGym ecosystem includes AgentLab, a framework for processing input representation and managing web agent experiments. We use AgentLab to design our representation function \\( R \\), ensuring unified hyperparameters and inputs. As a result, we can avoid unintended differences that may arise from customizing prompts and representations for each LLM." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.644, + 0.312, + 0.658 + ], + "angle": 0, + "content": "A.2 Annotations" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.668, + 0.827, + 0.797 + ], + "angle": 0, + "content": "Trajectory filtering In total, 351 tasks were considered across 5 benchmarks (33 in AssistantBench, 100 in VisualWebArena, 100 in WebArena, 18 in WorkArena, and 100 in WorkArena++) We collect trajectories from agents built from each of three multimodal models: Claude 3.7 Sonnet, GPT-4o, Qwen2.5-VL. Moreover, since Llama 3.3 is not multimodal, we only collect trajectories on 251 tasks (excluding VisualWebArena). Additionally, Llama 3.3 did not complete two WebArena tasks (nos. 735 and 805) due to timeout issues that consistently occurred in the environment, despite multiple attempts to restart. Thus, we obtain a total of 1302 trajectories, where 196 are stored in the development split and 1106 in the test split." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.803, + 0.825, + 0.888 + ], + "angle": 0, + "content": "Interface To annotate the trajectories, we designed a fully customized annotation interface using Gradio (see Figure 4). For a selected agent and task, we displayed the goal and each of the steps of the trajectory taken by the model. It shows the model's reasoning and action, as well as a screenshot with the action element on overlay. Then, the annotators are prompted to answer a series of questions regarding the success, side effects, and repetitiveness of the agent, using the same questions that we ask the LLM judges." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Shared Knowledge Given that the annotators are experts, it is possible that the annotators share knowledge of web agents that non-expert may not possess; we identify several shared" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.273 + ], + "angle": 0, + "content": "knowledge facts. (1) web agent design and capabilities: the annotators are aware that the agents are designed with LLMs, some of which have multimodal capabilities, and that they are capable of generating reasoning traces to support actions, and that the LLMs may be subject to hallucination or may product repetitive sequences of text. (2) dedicated web environments: the annotators know that several the websites used in the project come from prior publications in the domain, including WebArena (Zhou et al., 2024), VisualWebArena (Koh et al., 2024), WorkArena (Drouin et al., 2024; Boisvert et al., 2025). They are aware that some of the websites are designed specific for the task, whereas others come from real-world websites. (3) Automatic Evaluation: the annotators know that the web environments employ automatic evaluation methods, such as string matching and URL matching, to evaluate the agents. Thus, a task that is successful or unsuccessful may terminate earlier, but the agent will not be guaranteed to receive a positive reward for that task." + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.28, + 0.828, + 0.462 + ], + "angle": 0, + "content": "Annotator agreements and disagreements resolution For most tasks, binary annotations can be produced. However, in some cases, the annotator may not be certain of their annotation, and are allowed to mark a trajectory as uncertain, which was subsequently reviewed by the other annotators. In some cases, annotators may disagree with their judgments. In general, a salient reason for mismatch is the ambiguity of the instructions. For example, a task instruction might mention \"buy a black t-shirt\", but may not specify if it is fully black or can have other graphics. In such cases, annotators are advised to go for the most lenient option. More generally, to ensure that annotators can easily voice their uncertainty and disagreement, the first half of the annotation was conducted in person with all annotators present concurrently. Thus, when an annotator was uncertain about the annotation for a trajectory, they can ask other annotators, who can deliberate about the correct annotation until a consensus is reached. This approach further allows other annotators to align to the consensus for the remaining annotations." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.475, + 0.31, + 0.491 + ], + "angle": 0, + "content": "A.3 LLM Judges" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.501, + 0.827, + 0.545 + ], + "angle": 0, + "content": "**Prompts** We use simple system prompt (Figure 5) and user message (Figure 6) templates without model-specific commands, allowing our prompt to be transferred to any LLM. We use distinct tags, such as and , to facilitate parsing the model output." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.551, + 0.827, + 0.595 + ], + "angle": 0, + "content": "Results We report extended results for 10 LLM judges, with the overall results in Table 7 and the finegrained results in Table 8 over all agents; the unaggregated results are presented in Tables 9 to 12." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.174, + 0.111, + 0.825, + 0.356 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.356, + 0.508, + 0.584 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.356, + 0.825, + 0.584 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.251, + 0.593, + 0.747, + 0.61 + ], + "angle": 0, + "content": "Figure 4: User Interface used by annotators for answering questions" + }, + { + "type": "table", + "bbox": [ + 0.198, + 0.638, + 0.8, + 0.884 + ], + "angle": 0, + "content": "
Domain#Domain#Domain#
duckduckgo.com129google.com112wizards.com24
blackbaudhosting.com21fedex.com17mtggoldfish.com17
usps.com12fidelity.ca12weather.gov10
yelp.com9linkedin.com9rottentomatoes.com9
nih.gov8tcgplayer.com8imdb.com8
yahoo.com7cagreatamerica.com7thedrinknation.com6
tripadvisor.com6express.dhl6californiagreatamerica.com5
seattlechildrensmuseum.org5monday.com5fubo.tv5
philamuseum.org5weatherspark.com5bing.com5
ensembl.org4wellhub.com4hubbioo.com3
wholefoodsmarket.com3alltrails.com3target.com2
andersmartialarts.com2wikipedia.org2sfyimby.com2
currentresults.com2stockanalysis.com2speakrj.com2
x.com2apple.com2extremeweatherwatch.com2
tmplclubs.com2sixflags.com1etf.com1
amazon.com1netflixreleases.com1weather-and-climate.com1
wunderground.com1redfin.com1talesofamountainmama.com1
themeparkcenter.com1seattleweatherblog.com1chromewebdata1
peacefoodnyc.com1sec.gov1calicolabs.com1
easyship.com1onlineshippingcalculator.com1tripadvisor.ca1
nyunews.com1fandango.com1aimobo.io1
anytots.com1morningstar.com1visitphilly.com1
" + }, + { + "type": "table_caption", + "bbox": [ + 0.348, + 0.897, + 0.651, + 0.91 + ], + "angle": 0, + "content": "Table 4: AssistantBench Website Visit Counts" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.199, + 0.113, + 0.8, + 0.207 + ], + "angle": 0, + "content": "
ValueFlags
Trueuseifax, use_som, use-thinking, use_concrete_example, use_ABSTRACT_example, use_hints, be_cautious
Falseuse_html, usepast_error_logs, use Think_history, use_diff, filter Visible_elements_only, long_description, individual/examples, use_plan, use_criticise, use_memory, enable chatting
" + }, + { + "type": "table_caption", + "bbox": [ + 0.368, + 0.216, + 0.63, + 0.233 + ], + "angle": 0, + "content": "Table 5: Agentlab Hyperparameters" + }, + { + "type": "table", + "bbox": [ + 0.226, + 0.261, + 0.773, + 0.611 + ], + "angle": 0, + "content": "
BenchmarkAgentExpertLLM JudgeRule-based
AssistantBenchClaude 3.7 S.11.111.10.8
GPT-4o14.814.83.7
Llama 3.33.77.45.3
Qwen2.5-VL0.00.02.2
WebArenaClaude 3.7 S.55.164.130.8
GPT-4o42.350.025.6
Llama 3.322.427.618.4
Qwen2.5-VL33.352.629.5
VisualWebArenaClaude 3.7 S.28.334.823.9
GPT-4o35.947.817.4
Qwen2.5-VL21.734.817.4
WorkArenaClaude 3.7 S.68.868.850.0
GPT-4o50.056.250.0
Llama 3.356.250.056.2
Qwen2.5-VL56.256.256.2
WorkArena++Claude 3.7 S.18.420.78.1
GPT-4o18.411.54.6
Llama 3.39.25.83.5
Qwen2.5-VL13.814.911.5
OverallClaude 3.7 S.33.038.020.4
GPT-4o31.335.316.3
Llama 3.317.017.513.3
Qwen2.5-VL22.331.719.5
" + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.622, + 0.825, + 0.637 + ], + "angle": 0, + "content": "Table 6: Success Rate by evaluation type. For the LLM judge, we use GPT-4o with accessibility trees." + }, + { + "type": "table", + "bbox": [ + 0.198, + 0.666, + 0.803, + 0.872 + ], + "angle": 0, + "content": "
JudgeSuccessSide EffectRepetition
PRF1PRF1PRF1
AER-C67.771.969.7------
AER-V67.671.569.5------
Claude 3.7 S. (A)68.881.674.714.034.720.082.894.988.4
Claude 3.7 S. (S)69.476.372.714.144.421.482.094.587.8
Functional83.855.967.1------
GPT-4o (A)69.883.175.97.791.714.280.496.987.9
GPT-4o (S)68.180.373.77.590.313.879.296.286.9
GPT-4o Mini (A)61.586.171.77.270.813.078.646.458.3
GPT-4o Mini (S)64.578.370.86.631.911.092.318.530.8
Llama 3.3 (A)67.779.072.96.979.212.780.191.685.5
NNetNav52.582.464.1------
Qwen2.5-VL (A)64.389.875.09.055.615.488.172.679.6
Qwen2.5-VL (S)64.586.173.78.858.315.288.764.674.7
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.881, + 0.825, + 0.911 + ], + "angle": 0, + "content": "Table 7: Results over all benchmarks by judge. We report the precision (P) as the primary metric, and F1 and recall (R) as the auxiliary metrics." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.189, + 0.129, + 0.81, + 0.844 + ], + "angle": 0, + "content": "
BenchmarkJudgeSuccessSide EffectRepetition
PRF1PRF1PRF1
AssistantBenchAER-C83.362.571.4------
AER-V83.362.571.4------
Claude 3.7 S. (A)87.587.587.57.133.311.870.194.080.3
Claude 3.7 S. (S)71.462.566.77.733.312.570.892.080.0
Functional25.012.516.7------
GPT-4o (A)77.887.582.43.2100.06.368.696.080.0
GPT-4o (S)77.887.582.43.2100.06.367.194.078.3
GPT-4o Mini (A)80.0100.088.92.766.75.259.332.041.6
GPT-4o Mini (S)80.0100.088.93.433.36.390.018.030.0
Llama 3.3 (A)75.075.075.03.4100.06.665.896.078.0
NNNetNav20.862.531.2------
Qwen2.5-VL (A)72.7100.084.20.00.00.073.862.067.4
Qwen2.5-VL (S)70.087.577.85.766.710.572.252.060.5
VisualWebArenaAER-C56.070.962.6------
AER-V61.279.769.2------
Claude 3.7 S. (A)61.077.268.216.735.722.778.796.886.8
Claude 3.7 S. (S)64.874.769.417.142.924.577.897.686.6
Functional85.258.269.2------
GPT-4o (A)63.086.172.712.489.321.875.298.485.2
GPT-4o (S)60.782.369.912.492.921.972.799.283.9
GPT-4o Mini (A)57.983.568.410.764.318.474.446.056.9
GPT-4o Mini (S)57.473.464.411.235.717.187.121.434.4
Llama 3.3 (A)59.674.766.312.082.121.074.492.182.3
NNNetNav54.569.661.1------
Qwen2.5-VL (A)59.388.671.117.271.427.886.973.879.8
Qwen2.5-VL (S)58.587.370.015.464.324.883.359.569.4
WebArenaAER-C68.883.275.3------
AER-V67.684.074.9------
Claude 3.7 S. (A)69.389.177.911.828.616.784.988.686.7
Claude 3.7 S. (S)69.389.177.914.838.121.380.885.182.9
Functional79.053.864.0------
GPT-4o (A)70.289.178.59.690.517.482.993.988.1
GPT-4o (S)69.989.978.77.976.214.382.792.187.1
GPT-4o Mini (A)63.590.874.710.171.417.770.339.550.6
GPT-4o Mini (S)66.986.675.57.528.611.986.711.420.1
Llama 3.3 (A)68.286.676.38.676.215.479.786.082.7
NNNetNav54.390.867.9------
Qwen2.5-VL (A)63.694.175.96.728.610.984.763.272.4
Qwen2.5-VL (S)62.992.474.89.438.115.190.550.064.4
WorkArenaAER-C100.081.189.6------
AER-V96.473.083.1------
Claude 3.7 S. (A)85.091.988.38.350.014.376.086.480.8
Claude 3.7 S. (S)85.378.481.70.00.00.077.377.377.3
Functional100.091.995.8------
GPT-4o (A)94.694.694.62.650.05.070.486.477.5
GPT-4o (S)93.881.187.05.3100.010.072.081.876.6
GPT-4o Mini (A)84.286.585.33.350.06.266.736.447.1
GPT-4o Mini (S)90.375.782.40.00.00.0100.018.230.8
Llama 3.3 (A)94.389.291.70.00.00.081.881.881.8
NNNetNav77.391.983.9------
Qwen2.5-VL (A)87.291.989.50.00.00.0100.059.174.3
Qwen2.5-VL (S)93.881.187.00.00.00.086.759.170.3
WorkArena++AER-C66.742.351.8------
AER-V59.330.840.5------
Claude 3.7 S. (A)66.762.764.717.138.923.787.597.492.2
Claude 3.7 S. (S)66.750.057.113.661.122.287.398.992.8
Functional83.338.552.6------
GPT-4o (A)63.055.859.25.5100.010.385.698.591.6
GPT-4o (S)59.653.856.65.5100.010.484.598.290.8
GPT-4o Mini (A)49.476.960.15.283.39.787.852.966.1
GPT-4o Mini (S)54.865.459.64.633.38.196.520.233.4
Llama 3.3 (A)62.761.562.14.783.38.986.793.890.1
NNNetNav43.278.855.8------
Qwen2.5-VL (A)60.378.868.37.577.813.791.979.085.0
Qwen2.5-VL (S)64.473.168.56.477.811.893.275.783.6
" + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.868, + 0.825, + 0.896 + ], + "angle": 0, + "content": "Table 8: Finegrained results by benchmark and judge for all agents. We report the precision (P) as the primary metric, and F1 and recall (R) as the auxiliary metrics." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.189, + 0.128, + 0.812, + 0.847 + ], + "angle": 0, + "content": "
BenchmarkJudgeSuccessSide EffectRepetition
PRF1PRF1PRF1
AssistantBenchAER-C83.362.571.4------
AER-V83.362.571.4------
Claude 3.7 S. (A)87.587.587.57.133.311.870.194.080.3
Claude 3.7 S. (S)71.462.566.77.733.312.570.892.080.0
Functional25.012.516.7------
GPT-4o (A)77.887.582.43.2100.06.368.696.080.0
GPT-4o (S)77.887.582.43.2100.06.367.194.078.3
GPT-4o Mini (A)80.0100.088.92.766.75.259.332.041.6
GPT-4o Mini (S)80.0100.088.93.433.36.390.018.030.0
Llama 3.3 (A)75.075.075.03.4100.06.665.896.078.0
NNNetNav20.862.531.2------
Qwen2.5-VL (A)72.7100.084.20.00.00.073.862.067.4
Qwen2.5-VL (S)70.087.577.85.766.710.572.252.060.5
VisualWebArenaAER-C56.070.962.6------
AER-V61.279.769.2------
Claude 3.7 S. (A)61.077.268.216.735.722.778.796.886.8
Claude 3.7 S. (S)64.874.769.417.142.924.577.897.686.6
Functional85.258.269.2------
GPT-4o (A)63.086.172.712.489.321.875.298.485.2
GPT-4o (S)60.782.369.912.492.921.972.799.283.9
GPT-4o Mini (A)57.983.568.410.764.318.474.446.056.9
GPT-4o Mini (S)57.473.464.411.235.717.187.121.434.4
Llama 3.3 (A)59.674.766.312.082.121.074.492.182.3
NNNetNav54.569.661.1------
Qwen2.5-VL (A)59.388.671.117.271.427.886.973.879.8
Qwen2.5-VL (S)58.587.370.015.464.324.883.359.569.4
WebArenaAER-C68.883.275.3------
AER-V67.684.074.9------
Claude 3.7 S. (A)69.389.177.911.828.616.784.988.686.7
Claude 3.7 S. (S)69.389.177.914.838.121.380.885.182.9
Functional79.053.864.0------
GPT-4o (A)70.289.178.59.690.517.482.993.988.1
GPT-4o (S)69.989.978.77.976.214.382.792.187.1
GPT-4o Mini (A)63.590.874.710.171.417.770.339.550.6
GPT-4o Mini (S)66.986.675.57.528.611.986.711.420.1
Llama 3.3 (A)68.286.676.38.676.215.479.786.082.7
NNNetNav54.390.867.9------
Qwen2.5-VL (A)63.694.175.96.728.610.984.763.272.4
Qwen2.5-VL (S)62.992.474.89.438.115.190.550.064.4
WorkArenaAER-C100.081.189.6------
AER-V96.473.083.1------
Claude 3.7 S. (A)85.091.988.38.350.014.376.086.480.8
Claude 3.7 S. (S)85.378.481.70.00.00.077.377.377.3
Functional100.091.995.8------
GPT-4o (A)94.694.694.62.650.05.070.486.477.5
GPT-4o (S)93.881.187.05.3100.010.072.081.876.6
GPT-4o Mini (A)84.286.585.33.350.06.266.736.447.1
GPT-4o Mini (S)90.375.782.40.00.00.0100.018.230.8
Llama 3.3 (A)94.389.291.70.00.00.081.881.881.8
NNNetNav77.391.983.9------
Qwen2.5-VL (A)87.291.989.50.00.00.0100.059.174.3
Qwen2.5-VL (S)93.881.187.00.00.00.086.759.170.3
WorkArena++AER-C66.742.351.8------
AER-V59.330.840.5------
Claude 3.7 S. (A)66.762.764.717.138.923.787.597.492.2
Claude 3.7 S. (S)66.750.057.113.661.122.287.398.992.8
Functional83.338.552.6------
GPT-4o (A)63.055.859.25.5100.010.385.698.591.6
GPT-4o (S)59.653.856.65.5100.010.484.598.290.8
GPT-4o Mini (A)49.476.960.15.283.39.787.852.966.1
GPT-4o Mini (S)54.865.459.64.633.38.196.520.233.4
Llama 3.3 (A)62.761.562.14.783.38.986.793.890.1
NNNetNav43.278.855.8------
Qwen2.5-VL (A)60.378.868.37.577.813.791.979.085.0
Qwen2.5-VL (S)64.473.168.56.477.811.893.275.783.6
" + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.868, + 0.825, + 0.896 + ], + "angle": 0, + "content": "Table 9: Finegrained results by benchmark and judge for Qwen2.5-VL agent. We report the precision (P) as the primary metric, and F1 and recall (R) as the auxiliary metrics." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.189, + 0.129, + 0.81, + 0.844 + ], + "angle": 0, + "content": "
BenchmarkJudgeSuccessSide EffectRepetition
PRF1PRF1PRF1
AssistantBenchAER-C83.362.571.4------
AER-V83.362.571.4------
Claude 3.7 S. (A)87.587.587.57.133.311.870.194.080.3
Claude 3.7 S. (S)71.462.566.77.733.312.570.892.080.0
Functional25.012.516.7------
GPT-4o (A)77.887.582.43.2100.06.368.696.080.0
GPT-4o (S)77.887.582.43.2100.06.367.194.078.3
GPT-4o Mini (A)80.0100.088.92.766.75.259.332.041.6
GPT-4o Mini (S)80.0100.088.93.433.36.390.018.030.0
Llama 3.3 (A)75.075.075.03.4100.06.665.896.078.0
NNNetNav20.862.531.2------
Qwen2.5-VL (A)72.7100.084.20.00.00.073.862.067.4
Qwen2.5-VL (S)70.087.577.85.766.710.572.252.060.5
VisualWebArenaAER-C56.070.962.6------
AER-V61.279.769.2------
Claude 3.7 S. (A)61.077.268.216.735.722.778.796.886.8
Claude 3.7 S. (S)64.874.769.417.142.924.577.897.686.6
Functional85.258.269.2------
GPT-4o (A)63.086.172.712.489.321.875.298.485.2
GPT-4o (S)60.782.369.912.492.921.972.799.283.9
GPT-4o Mini (A)57.983.568.410.764.318.474.446.056.9
GPT-4o Mini (S)57.473.464.411.235.717.187.121.434.4
Llama 3.3 (A)59.674.766.312.082.121.074.492.182.3
NNNetNav54.569.661.1------
Qwen2.5-VL (A)59.388.671.117.271.427.886.973.879.8
Qwen2.5-VL (S)58.587.370.015.464.324.883.359.569.4
WebArenaAER-C68.883.275.3------
AER-V67.684.074.9------
Claude 3.7 S. (A)69.389.177.911.828.616.784.988.686.7
Claude 3.7 S. (S)69.389.177.914.838.121.380.885.182.9
Functional79.053.864.0------
GPT-4o (A)70.289.178.59.690.517.482.993.988.1
GPT-4o (S)69.989.978.77.976.214.382.792.187.1
GPT-4o Mini (A)63.590.874.710.171.417.770.339.550.6
GPT-4o Mini (S)66.986.675.57.528.611.986.711.420.1
Llama 3.3 (A)68.286.676.38.676.215.479.786.082.7
NNNetNav54.390.867.9------
Qwen2.5-VL (A)63.694.175.96.728.610.984.763.272.4
Qwen2.5-VL (S)62.992.474.89.438.115.190.550.064.4
WorkArenaAER-C100.081.189.6------
AER-V96.473.083.1------
Claude 3.7 S. (A)85.091.988.38.350.014.376.086.480.8
Claude 3.7 S. (S)85.378.481.70.00.00.077.377.377.3
Functional100.091.995.8------
GPT-4o (A)94.694.694.62.650.05.070.486.477.5
GPT-4o (S)93.881.187.05.3100.010.072.081.876.6
GPT-4o Mini (A)84.286.585.33.350.06.266.736.447.1
GPT-4o Mini (S)90.375.782.40.00.00.0100.018.230.8
Llama 3.3 (A)94.389.291.70.00.00.081.881.881.8
NNNetNav77.391.983.9------
Qwen2.5-VL (A)87.291.989.50.00.00.0100.059.174.3
Qwen2.5-VL (S)93.881.187.00.00.00.086.759.170.3
WorkArena++AER-C66.742.351.8------
AER-V59.330.840.5------
Claude 3.7 S. (A)66.762.764.717.138.923.787.597.492.2
Claude 3.7 S. (S)66.750.057.113.661.122.287.398.992.8
Functional83.338.552.6------
GPT-4o (A)63.055.859.25.5100.010.385.698.591.6
GPT-4o (S)59.653.856.65.5100.010.484.598.290.8
GPT-4o Mini (A)49.476.960.15.283.39.787.852.966.1
GPT-4o Mini (S)54.865.459.64.633.38.196.520.233.4
Llama 3.3 (A)62.761.562.14.783.38.986.793.890.1
NNNetNav43.278.855.8------
Qwen2.5-VL (A)60.378.868.37.577.813.791.979.085.0
Qwen2.5-VL (S)64.473.168.56.477.811.893.275.783.6
" + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.868, + 0.825, + 0.896 + ], + "angle": 0, + "content": "Table 10: Finegrained results by benchmark and judge for Llama 3.3 agent. We report the precision (P) as the primary metric, and F1 and recall (R) as the auxiliary metrics." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.189, + 0.128, + 0.812, + 0.847 + ], + "angle": 0, + "content": "
BenchmarkJudgeSuccessSide EffectRepetition
PRF1PRF1PRF1
AssistantBenchAER-C83.362.571.4------
AER-V83.362.571.4------
Claude 3.7 S. (A)87.587.587.57.133.311.870.194.080.3
Claude 3.7 S. (S)71.462.566.77.733.312.570.892.080.0
Functional25.012.516.7------
GPT-4o (A)77.887.582.43.2100.06.368.696.080.0
GPT-4o (S)77.887.582.43.2100.06.367.194.078.3
GPT-4o Mini (A)80.0100.088.92.766.75.259.332.041.6
GPT-4o Mini (S)80.0100.088.93.433.36.390.018.030.0
Llama 3.3 (A)75.075.075.03.4100.06.665.896.078.0
NNNetNav20.862.531.2------
Qwen2.5-VL (A)72.7100.084.20.00.00.073.862.067.4
Qwen2.5-VL (S)70.087.577.85.766.710.572.252.060.5
VisualWebArenaAER-C56.070.962.6------
AER-V61.279.769.2------
Claude 3.7 S. (A)61.077.268.216.735.722.778.796.886.8
Claude 3.7 S. (S)64.874.769.417.142.924.577.897.686.6
Functional85.258.269.2------
GPT-4o (A)63.086.172.712.489.321.875.298.485.2
GPT-4o (S)60.782.369.912.492.921.972.799.283.9
GPT-4o Mini (A)57.983.568.410.764.318.474.446.056.9
GPT-4o Mini (S)57.473.464.411.235.717.187.121.434.4
Llama 3.3 (A)59.674.766.312.082.121.074.492.182.3
NNNetNav54.569.661.1------
Qwen2.5-VL (A)59.388.671.117.271.427.886.973.879.8
Qwen2.5-VL (S)58.587.370.015.464.324.883.359.569.4
WebArenaAER-C68.883.275.3------
AER-V67.684.074.9------
Claude 3.7 S. (A)69.389.177.911.828.616.784.988.686.7
Claude 3.7 S. (S)69.389.177.914.838.121.380.885.182.9
Functional79.053.864.0------
GPT-4o (A)70.289.178.59.690.517.482.993.988.1
GPT-4o (S)69.989.978.77.976.214.382.792.187.1
GPT-4o Mini (A)63.590.874.710.171.417.770.339.550.6
GPT-4o Mini (S)66.986.675.57.528.611.986.711.420.1
Llama 3.3 (A)68.286.676.38.676.215.479.786.082.7
NNNetNav54.390.867.9------
Qwen2.5-VL (A)63.694.175.96.728.610.984.763.272.4
Qwen2.5-VL (S)62.992.474.89.438.115.190.550.064.4
WorkArenaAER-C100.081.189.6------
AER-V96.473.083.1------
Claude 3.7 S. (A)85.091.988.38.350.014.376.086.480.8
Claude 3.7 S. (S)85.378.481.70.00.00.077.377.377.3
Functional100.091.995.8------
GPT-4o (A)94.694.694.62.650.05.070.486.477.5
GPT-4o (S)93.881.187.05.3100.010.072.081.876.6
GPT-4o Mini (A)84.286.585.33.350.06.266.736.447.1
GPT-4o Mini (S)90.375.782.40.00.00.0100.018.230.8
Llama 3.3 (A)94.389.291.70.00.00.081.881.881.8
NNNetNav77.391.983.9------
Qwen2.5-VL (A)87.291.989.50.00.00.0100.059.174.3
Qwen2.5-VL (S)93.881.187.00.00.00.086.759.170.3
WorkArena++AER-C66.742.351.8------
AER-V59.330.840.5------
Claude 3.7 S. (A)66.762.764.717.138.923.787.597.492.2
Claude 3.7 S. (S)66.750.057.113.661.122.287.398.992.8
Functional83.338.552.6------
GPT-4o (A)63.055.859.25.5100.010.385.698.591.6
GPT-4o (S)59.653.856.65.5100.010.484.598.290.8
GPT-4o Mini (A)49.476.960.15.283.39.787.852.966.1
GPT-4o Mini (S)54.865.459.64.633.38.196.520.233.4
Llama 3.3 (A)62.761.562.14.783.38.986.793.890.1
NNNetNav43.278.855.8------
Qwen2.5-VL (A)60.378.868.37.577.813.791.979.085.0
Qwen2.5-VL (S)64.473.168.56.477.811.893.275.783.6
" + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.867, + 0.825, + 0.896 + ], + "angle": 0, + "content": "Table 11: Finegrained results by benchmark and judge for Claude 3.7 Sonnet agent. We report the precision (P) as the primary metric, and F1 and recall (R) as the auxiliary metrics." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.507, + 0.96 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.189, + 0.129, + 0.81, + 0.844 + ], + "angle": 0, + "content": "
BenchmarkJudgeSuccessSide EffectRepetition
PRF1PRF1PRF1
AssistantBenchAER-C83.362.571.4------
AER-V83.362.571.4------
Claude 3.7 S. (A)87.587.587.57.133.311.870.194.080.3
Claude 3.7 S. (S)71.462.566.77.733.312.570.892.080.0
Functional25.012.516.7------
GPT-4o (A)77.887.582.43.2100.06.368.696.080.0
GPT-4o (S)77.887.582.43.2100.06.367.194.078.3
GPT-4o Mini (A)80.0100.088.92.766.75.259.332.041.6
GPT-4o Mini (S)80.0100.088.93.433.36.390.018.030.0
Llama 3.3 (A)75.075.075.03.4100.06.665.896.078.0
NNNetNav20.862.531.2------
Qwen2.5-VL (A)72.7100.084.20.00.00.073.862.067.4
Qwen2.5-VL (S)70.087.577.85.766.710.572.252.060.5
VisualWebArenaAER-C56.070.962.6------
AER-V61.279.769.2------
Claude 3.7 S. (A)61.077.268.216.735.722.778.796.886.8
Claude 3.7 S. (S)64.874.769.417.142.924.577.897.686.6
Functional85.258.269.2------
GPT-4o (A)63.086.172.712.489.321.875.298.485.2
GPT-4o (S)60.782.369.912.492.921.972.799.283.9
GPT-4o Mini (A)57.983.568.410.764.318.474.446.056.9
GPT-4o Mini (S)57.473.464.411.235.717.187.121.434.4
Llama 3.3 (A)59.674.766.312.082.121.074.492.182.3
NNNetNav54.569.661.1------
Qwen2.5-VL (A)59.388.671.117.271.427.886.973.879.8
Qwen2.5-VL (S)58.587.370.015.464.324.883.359.569.4
WebArenaAER-C68.883.275.3------
AER-V67.684.074.9------
Claude 3.7 S. (A)69.389.177.911.828.616.784.988.686.7
Claude 3.7 S. (S)69.389.177.914.838.121.380.885.182.9
Functional79.053.864.0------
GPT-4o (A)70.289.178.59.690.517.482.993.988.1
GPT-4o (S)69.989.978.77.976.214.382.792.187.1
GPT-4o Mini (A)63.590.874.710.171.417.770.339.550.6
GPT-4o Mini (S)66.986.675.57.528.611.986.711.420.1
Llama 3.3 (A)68.286.676.38.676.215.479.786.082.7
NNNetNav54.390.867.9------
Qwen2.5-VL (A)63.694.175.96.728.610.984.763.272.4
Qwen2.5-VL (S)62.992.474.89.438.115.190.550.064.4
WorkArenaAER-C100.081.189.6------
AER-V96.473.083.1------
Claude 3.7 S. (A)85.091.988.38.350.014.376.086.480.8
Claude 3.7 S. (S)85.378.481.70.00.00.077.377.377.3
Functional100.091.995.8------
GPT-4o (A)94.694.694.62.650.05.070.486.477.5
GPT-4o (S)93.881.187.05.3100.010.072.081.876.6
GPT-4o Mini (A)84.286.585.33.350.06.266.736.447.1
GPT-4o Mini (S)90.375.782.40.00.00.0100.018.230.8
Llama 3.3 (A)94.389.291.70.00.00.081.881.881.8
NNNetNav77.391.983.9------
Qwen2.5-VL (A)87.291.989.50.00.00.0100.059.174.3
Qwen2.5-VL (S)93.881.187.00.00.00.086.759.170.3
WorkArena++AER-C66.742.351.8------
AER-V59.330.840.5------
Claude 3.7 S. (A)66.762.764.717.138.923.787.597.492.2
Claude 3.7 S. (S)66.750.057.113.661.122.287.398.992.8
Functional83.338.552.6------
GPT-4o (A)63.055.859.25.5100.010.385.698.591.6
GPT-4o (S)59.653.856.65.5100.010.484.598.290.8
GPT-4o Mini (A)49.476.960.15.283.39.787.852.966.1
GPT-4o Mini (S)54.865.459.64.633.38.196.520.233.4
Llama 3.3 (A)62.761.562.14.783.38.986.793.890.1
NNNetNav43.278.855.8------
Qwen2.5-VL (A)60.378.868.37.577.813.791.979.085.0
Qwen2.5-VL (S)64.473.168.56.477.811.893.275.783.6
" + }, + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.867, + 0.825, + 0.895 + ], + "angle": 0, + "content": "Table 12: Finegrained results by benchmark and judge for GPT-4o agent. We report the precision (P) as the primary metric, and F1 and recall (R) as the auxiliary metrics." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.199, + 0.11, + 0.367, + 0.125 + ], + "angle": 0, + "content": "System Prompt Template" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.137, + 0.798, + 0.214 + ], + "angle": 0, + "content": "You are a Language Model specialized in judging the performance of web agents in web navigation tasks. For a certain website, you are given the goal of a navigation task, the current URL of the webpage, the actions taken by the agent, and the thought process of the agent. Additionally, you will have access to the first and last screenshots, as well as the last accessibility tree. Your task is to answer several questions about the agent's performance in the task." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.225, + 0.782, + 0.251 + ], + "angle": 0, + "content": "Question 1: Was the sequence of actions below successful in achieving the goal? \nChoices: Successful, Unsuccessful" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.263, + 0.798, + 0.301 + ], + "angle": 0, + "content": "Question 2: Did the agent perform unnecessary actions that could lead to unintended side effects? \nChoices: Yes, No" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.313, + 0.798, + 0.34 + ], + "angle": 0, + "content": "Question 3: Did the agent perform the task optimally, by only performing necessary actions and avoiding unnecessary ones?" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.351, + 0.796, + 0.377 + ], + "angle": 0, + "content": "Choices: 1. Complete Failure, 2. Suboptimal, 3. Somewhat Optimal, 4. Completely Optimal" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.389, + 0.775, + 0.427 + ], + "angle": 0, + "content": "Question 4: Did the agent loop through a sequence of actions that did not make progress towards the goal? \nChoices: Yes, No" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.439, + 0.796, + 0.477 + ], + "angle": 0, + "content": "You should provide your reasoning process for each question, and you can refer to the screenshots and the accessibility tree to support your answers. Your answer must follow the following format:" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.489, + 0.51, + 0.553 + ], + "angle": 0, + "content": "your reasoning here answer answer answer answer" + }, + { + "type": "image_caption", + "bbox": [ + 0.285, + 0.581, + 0.713, + 0.598 + ], + "angle": 0, + "content": "Figure 5: System Prompt Template used for the simplified judge" + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.617, + 0.367, + 0.634 + ], + "angle": 0, + "content": "User Prompt Template" + }, + { + "type": "code", + "bbox": [ + 0.198, + 0.645, + 0.8, + 0.874 + ], + "angle": 0, + "content": "The user goal is: {goal} \nThe agent performed the following actions: \nStep: {step_number} \nURL: {url} \nAction: {action} \nReasoning: {reasoning} \n----- \nThe last accessibility tree is: \n{axtree} \nHere is the screenshot of the last step. \n{screenshot} \nProvide your reasoning and answer the four questions from the system prompt, using \nthe specified format." + }, + { + "type": "image_caption", + "bbox": [ + 0.293, + 0.903, + 0.703, + 0.919 + ], + "angle": 0, + "content": "Figure 6: User Prompt Template used for the simplified judge" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "23" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_08xxx/2504.08942/eb06eaaa-dd2d-4310-8824-a39ee9e3457c_origin.pdf b/data/2025/2504_08xxx/2504.08942/eb06eaaa-dd2d-4310-8824-a39ee9e3457c_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..ff0a75a7b538abd46114ec8623cad11d9681fc6b --- /dev/null +++ b/data/2025/2504_08xxx/2504.08942/eb06eaaa-dd2d-4310-8824-a39ee9e3457c_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30da2dc71e23fc57e9c2a8e0aae0d006e317eb9da8bd597549ac1091f89f2c09 +size 1949993 diff --git a/data/2025/2504_08xxx/2504.08942/full.md b/data/2025/2504_08xxx/2504.08942/full.md new file mode 100644 index 0000000000000000000000000000000000000000..461719352dc3d95a9caf6d6af0b2ac6d671a48c4 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08942/full.md @@ -0,0 +1,341 @@ +# AGENTREWARDBENCH: Evaluating Automatic Evaluations of Web Agent Trajectories + +Xing Han Lu12 Amirhossein Kazemnejad*2 + +Nicholas Meade12 Arkil Patel12 Dongchan Shin2 Alejandra Zambrano2 + +Karolina Stanczak12 Peter Shaw4 Christopher J. Pal2567 Siva Reddy1257 + +*Core contributor ¹McGill University ²Mila Quebec AI Institute ⁴Google DeepMind + +5Canada CIFAR AI Chair 6Polytechnique Montréal 7ServiceNow Research + +xing.han.lu@mail.mcgill.ca; siva.reddy@mila.quebec + +# Abstract + +Web agents enable users to perform tasks on web browsers through natural language interaction. Evaluating web agents trajectories is an important problem, since it helps us determine whether the agent successfully completed the tasks. Rule-based methods are widely used for this purpose, but they are challenging to extend to new tasks and may not always recognize successful trajectories. We may achieve higher accuracy through human evaluation, but the process would be substantially slower and more expensive. Automatic evaluations with LLMs may avoid the challenges of designing new rules and manually annotating trajectories, enabling faster and cost-effective evaluation. However, it is unclear how effective they are at evaluating web agents. To this end, we propose AGENTREWARD-BENCH, the first benchmark to assess the effectiveness of LLM judges for evaluating web agents. AGENTREWARD-BENCH contains 1302 trajectories across 5 benchmarks and 4 LLMs. Each trajectory in AGENTREWARD-BENCH is reviewed by an expert, who answers questions pertaining to the success, side effects, and repetitiveness of the agent. Using our benchmark, we evaluate 12 LLM judges and find that no single LLM excels across all benchmarks. We also find that the rule-based evaluation used by common benchmarks tends to underreport the success rate of web agents, highlighting a key weakness of rule-based evaluation and the need to develop more flexible automatic evaluations. We release the benchmark at: https://agent-reward-bench.github.io + +# 1 Introduction + +Giving a Large Language Model (LLM) access to a web browser unlocks an entirely new capability paradigm: beyond interacting with a user through a chat interface, such models can interact with the online world to complete tasks similar to how a human would. The promise of a new paradigm has motivated the design of LLMs to control interfaces such as web browsers, starting from earlier foundation models such as ACT-1 (Adept, 2022) to the more recent OpenAI Operator (OpenAI, 2025) and Claude Computer use (Anthropic, 2024a), showing promising results in real-world tasks (Zhou et al., 2024). + +To measure the progress of web agents, a well-designed benchmark should compile a collection of realistic tasks across diverse websites. As illustrated in Figure 1, a user may ask the agent to locate a Classifieds listing for a Google Pixel phone and submit an offer via a comment. Inside a dedicated environment (e.g., a self-hosted Classifieds site), the web agent would complete the task by filling the search bar, identifying the correct listing, and writing a comment to show interest in purchasing the item. To determine if the agent successfully completed the request, we need to automatically evaluate the agent's chosen actions – known as trajectories – using a set of rules uniquely designed for the task of finding a Pixel phone on Classifieds. As expected, rule-based evaluation is time-consuming for + +![](images/44813690a64ed31c86bd46805a16225d99f96e5c83308ced722d0c5e48357ff5.jpg) +Figure 1: Example from AGENTREWARDBENCH, where an LLM judge evaluates a web agent's trajectory on VisualWebArena (Koh et al., 2024). The benchmark compares judgments against expert annotations to determine the effectiveness of the judge for evaluating web agents. + +experts to design, and may not cover every successful scenario (e.g., what if the agent finds a different but valid listing?). It is also possible for an expert to annotate the trajectories, but it would be slow and expensive to scale across many web agents. This brings us to the following questions: Given a web agent trajectory, can an LLM decide if it is successful? If so, how do we determine which LLM is the most capable at evaluating web agents? + +Past works have shown that LLMs can be used as judges to evaluate the output of LLM chatbots (Zheng et al., 2023). More recently, LLM judges have been used for automatically evaluating trajectories from web agents (Pan et al., 2024; Murty et al., 2025; Trabucco et al., 2025). With highly accurate automatic evaluation methods, we can measure the progress of web agents on new sets of tasks, use them to synthesize trajectories for finetuning smaller models, and design reward models that can be used in a reinforcement learning (RL) setting. However, it remains unclear whether current automatic evaluators, whether rule-based or LLM-based, can predict the success of a trajectory in a way that reflects expert judgment. + +To address this problem, we introduce AGENTREWARDBENCH (§3), a benchmark for determining the capability of an LLM at evaluating web agents (see Figure 1). It consists of 1300 trajectories produced by 4 popular LLM agents on 5 diverse web environments, ranging from common tasks like online shopping and posting on a forum, to highly specialized requests in professional environments, such as updating task schedules on IT task management platforms. Each trajectory is labeled by expert annotators to determine whether the agent successfully completed the task, caused unintended side effects, or entered cycles of repetitive actions. Using this benchmark, we evaluate both existing and novel LLM judges (§4) alongside rule-based evaluation. We find that rule-based methods, which are used as the official automatic evaluation by environment-based benchmarks, severely underestimate the capabilities of agents and do not reflect how experts define success (§5). We further provide an in-depth analysis (§6) that highlights the weaknesses of existing LLMs when used as judges. Overall, we believe AGENTREWARDBENCH can be used to enable better automatic evaluation and reward modeling for web agents. + +# 2 Related Works + +Web Agents and Environments Designing agents that can automatically navigate user interfaces has been a long standing problem; earlier approaches employed program-based heuristics (St. Amant & Zettlemoyer, 2000), whereas later works on web navigation focus on training reinforcement learning (RL) models (Gur et al., 2018; Humphreys et al., 2022), + +language models (Nakano et al., 2021; Gur et al., 2023; Deng et al., 2023) and multimodal models (Shaw et al., 2023; Lu et al., 2024; Zheng et al., 2024). To measure the advancements in web agents, various benchmarks have been proposed, with initial works proposing simplified environments (Shi et al., 2017; Liu et al., 2018) and subsequent iterations focusing on specific tasks like web shopping (Yao et al., 2022). More recent benchmarks focus on designing realistic environments that cover commonly used websites (Zhou et al., 2024; Koh et al., 2024) as well as specialized environments (Drouin et al., 2024; Boisvert et al., 2025). + +LLM Judges Zheng et al. (2023) proposed using LLMs to predict human preferences of dialogue completion for chat models. They show that a GPT-4-based judge achieves over $80\%$ agreement with human votes on the task of selecting better completions between models pairs. Follow-up extends this framework to new modalities (Chen et al., 2024), metrics (Feizi et al., 2025) and coding agents (Zhuge et al., 2024); the latter, Agent-as-a-Judge, leverages intermediate feedback from the environment. He et al. (2024) extend the idea by using LLMs to judge trajectories from web agents, allowing them to determine task completion without human annotators, resulting in a high correlation with humans on a private subset of trajectories. To determine the quality of automatic judgments, Pan et al. (2024) evaluate four LLM judges using trajectories from a GPT-4 agent on WebArena tasks, and find that the best judge achieves $80.6\%$ accuracy against the rule-based evaluator from WebArena. Unlike prior works on LLM judges, we design AGENTREWARDBENCH with trajectories from several LLM agents on diverse web benchmarks, where each one is annotated by human experts on multiple dimensions. By following a human-focused approach similar to Lambert et al. (2024), we ensure that LLM judges are evaluated against expert preferences on a wide range of scenarios. + +Trajectory Synthesis Leveraging web environments that can be created and reset without real-world impact, recent works started to explore generating trajectories without human supervision. Leveraging LLM judges and LLM-generated tasks, trajectory synthesis can be used to bootstrap agent-judge training loops (Murty et al., 2024; 2025), to create contrastive pairs (Putta et al., 2024) for direct preference optimization (Rafailov et al., 2023), or as training data to finetune a base model (Lai et al., 2024; Patel et al., 2024; Trabucco et al., 2025). Although all the methods leverage an LLM judge, they lack a clear way of directly determining the quality of judgments, instead relying on the downstream performance improvement to validate their approach. To this end, AGENTREWARDBENCH enables researchers to choose the most appropriate LLM judge for a category of web tasks based on their effectiveness at evaluating web agents. + +# 3 AGENTREWARDBENCH + +In this work, we introduce AGENTREWARDBENCH, a benchmark designed to assess the capabilities of LLM judges for evaluating web agents (§3.1). We curate 5 diverse web environments and tasks (§3.2) in order to collect trajectories from web agents based on 4 LLMs (§3.3). For each trajectory, a team of expert annotators carefully reviews the screenshots, actions, and the agent's reasoning chains before labeling them as either successful or unsuccessful, alongside other auxiliary labels (see Figure 2). Finally, we evaluate LLM judges (Table 1) by comparing their predictions with expert annotations to determine their effectiveness for automatic evaluation. + +# 3.1 Assessment Framework + +Trajectory Definition Let $o_i$ be an observation of a browser at time step $i$ , $a_i$ be an action that can be executed on a webpage through a browser navigation engine $B$ such that $o_{i+1} = B(o_i, a_i)$ , and $r_i$ be the reasoning for choosing the action. We define a web agent trajectory as the sequence $\mathcal{T} = \{o_1, (r_1, a_1), o_2, (r_2, a_2), \ldots, o_{n-1}, (r_{n-1}, a_{n-1}), o_n\}$ where $o_n$ is the final observation in the trajectory. Each observation contains a screenshot of the browser $s_i$ , the Document Object Model (DOM) tree representation of the browser, and an accessibility (A11Y) tree rendered from the DOM tree. For the observation to be useful for an LLM agent, we need a representation function $R$ that produces $p_i = R(o_1, r_1, a_1, \ldots, o_i)$ , which can be used as an input for an LLM. If the agent is multimodal, $o_i$ would include + +![](images/fea37e571b8202015c318f34a31dad1d1cf79bfd93e97315862a68c6d58c4491.jpg) +Figure 2: AGENTREWARDBENCH creation process. We first collect trajectories from LLM agents inside web environments using instructions from several benchmarks. Then, the trajectories are reviewed by expert annotators, who indicate if the trajectory is successful, led to side effects, and contains repetition cycles. Finally, we use the annotated trajectories to evaluate LLM judges. + +screenshots; otherwise, it would be a textual representation of the page (e.g., accessibility tree). Then, $p_i$ is given to a language model to produce a completion $c_i = \mathrm{LM}(p_i)$ , or $c_i = \mathrm{VLM}(p_i, s_i)$ in the case of a multimodal LLM. The completion is parsed by an execution function $E$ to produce $(a_i, r_i) = E(c_i)$ . + +Annotation Design For each trajectory, an expert annotator reviews a goal $g$ and sequence $\{s_1, (r_1, a_1), \ldots, s_{n-1}, (r_{n-1}, a_{n-1}), s_n\}$ in order to answer questions $\mathcal{Q} = \{q_1, \ldots, q_m\}$ . We consider the answers produced, $\mathcal{A}^* = \{a_1^*, \ldots, a_m^*\}$ , as the ground truth annotations for the trajectory, which indicate whether the agent successfully completed $g$ . To collect $\mathcal{A}^*$ , we use the following $\mathcal{Q}$ in the annotation guidelines: + +1. Success: Was the sequence of actions successful in achieving the goal? +2. Side Effect: Did the agent perform unnecessary actions that could lead to unintended side effects? +3. Repetition Cycle: Did the agent loop through a sequence of actions that did not make progress towards the goal? + +Agreement with respect to success is the primary criterion with which we evaluate LLM judges. The remaining can be useful as auxiliary criteria for detecting issues ahead of time. For example, if an agent purchases several irrelevant products when the user only requested one, then the trajectory would be flagged for side effects, independent of task success. A judge can also indicate the presence of a cycle, for example, if the agent repeatedly clicks on a disabled button. Both signals can be used to penalize the agent during training or steer it to another action at inference. + +Annotation Setup The team of annotators consisted of 6 experts with a deep understanding of the tasks and environments through their research on web agents. They used a custom-built user interface that displays each trajectory with screenshots, actions, and reasoning. Rating disagreements were resolved by annotators discussing among themselves until clear annotations can be produced for ambiguous trajectories. Moreover, the annotators also have access to the environment and accessibility trees when screenshots are insufficient. + +Judge Model Given a goal $g$ , trajectory $\mathcal{T}$ and questions $\mathcal{Q}$ , a judge model returns a judgment $\hat{\mathcal{A}}$ , which is an estimate of $\mathcal{A}^*$ . We can use $\hat{\mathcal{A}}$ to derive a reward in RL or to automatically evaluate web agents when $\mathcal{A}^*$ is unavailable. To implement the judge, we need a judge-specific function $R_j$ that produces a representation of the trajectory, $p = R_j(o_1, r_1, a_1, \ldots, o_n)$ . $R_j =$ can vary substantially, ranging from a simple list of actions $a_1, \ldots, a_{n-1}$ , to using another LLM to process the observation history. We describe judges used in previous works and introduce a simplified judge in Section 4 and provide supplementary details in Section A.3. + +# 3.2 Tasks and Environments + +We select 5 benchmarks designed to evaluate web agents inside dedicated environments and real websites, including general-purpose (Zhou et al., 2024), vision-focused (Koh et al., 2024), real-world (Yoran et al., 2024), and enterprise-oriented (Drouin et al., 2024; Boisvert et al., 2025) tasks. In total, we curate 351 unique tasks across 8 environments and 66 websites, which we separate into 51 development and 300 test tasks (details in Section A.1). + +WebArena (WA; Zhou et al. 2024) This benchmark comprises 6 self-hosted websites covering a wide range of domains: customer relationship management, map navigation, online encyclopedia, shopping site, social forum, and software development collaboration platform. Each environment is derived from real open-source projects that develop self-hosted environments for both commercial and personal usage. Each task consists of a textual goal that requires a good understanding of one or multiple environments to complete. + +VisualWebArena (VWA; Koh et al. 2024) To complement WebArena's text-based goals, we also include VisualWebArena (VWA), a benchmark focusing on tasks that require visual reasoning to complete. For instance, a user may include an image alongside the goal, or the task could be designed to only be solved if the agent selects an item with a unique visual characteristic (e.g., purchasing a TV with the widest bezel). VWA also introduces a new online marketplace environment (Classifieds). + +AssistantBench (AB; Yoran et al. 2024) In addition to the self-hosted environments, we consider trajectories resulting from agent execution on real-world websites. This benchmark defines tasks that require navigating the internet, starting from a search engine. Since the test set is private, we use the validation set, which consists of 33 unique tasks. + +# WorkArena (Work; Drouin et al. 2024) and WorkArena++ (Wk++; Boisvert et al. 2025) + +To increase the diversity of tasks relevant to professional environments, we incorporate WorkArena (Boisvert et al., 2025), a benchmark of 18 basic tasks on ServiceNow, a software-as-a-service platform for professional workflows in the information technology (IT), human resources, and customer management domains. WorkArena++ introduces tasks with greater complexity, requiring planning and reasoning to correctly complete multiple sub-tasks. Including this alongside WorkArena allows us to evaluate judges on a wider range of task difficulty. We focus on the Level 2 tasks since Level 3 is too challenging for current agents. + +# 3.3 Web Agents Design + +To collect trajectories on the 5 benchmarks, we design web agents using two models from major commercial providers and two open-weight LLMs. + +LLM backbones On the commercial side, we use OpenAI's $GPT-4o^2$ (Hurst et al., 2024) and Anthropic's Claude 3.7 Sonnet (Anthropic, 2024b). They are the flagship models of their respective providers, both of which offer computer-use agents powered by their LLMs, namely OpenAI Operator (OpenAI, 2025) and Anthropic Claude's Computer use (Anthropic, 2024a). We select two leading open-weights LLMs to complement the commercial LLMs: Llama-3.3-70B (Grattafori et al., 2024) and Qwen2.5-VL (Bai et al., 2025). In both cases, we choose the instruction-tuned variant, which have undergone post-training for tool-use or UI navigation. Moreover, since Llama-3.3 is a text-only model, it was excluded from VisualWebArena, which requires image-based reasoning. + +Agent Platform By default, each LLM backbone receives an input processed by a representation function $R$ and generates a completion $c_{i}$ . Then, $c_{i}$ is interpreted as an action by an execution function $E$ . To implement $E$ , we use AgentLab and BrowserGym (Chezelles et al., 2025), an ecosystem for designing web agents using LLMs (details in Section A.1). + +Trajectory Annotations and Splits We collect a total of 1302 trajectories from our 4 LLM-based web agents across five benchmarks. Based on the task split (§3.2), 196 trajectories are + +
CategoryJudgeOverallABVWAWA PrecisionWorkWk++
PrecisionRecallF1
OfficialRule-based*83.855.967.125.085.279.0100.083.3
ExistingAER-C67.771.969.783.356.068.8100.066.7
AER-V67.671.569.583.361.267.696.459.3
NNetNav52.582.464.120.854.554.377.343.2
Ours (A)Claude 3.7 S.68.881.674.787.561.069.385.066.7
GPT-4o69.883.175.977.863.070.294.663.0
GPT-4o Mini61.586.171.780.057.963.584.249.4
Llama 3.367.779.072.975.059.668.294.362.7
Qwen2.5-VL64.389.875.072.759.363.687.260.3
Ours (S)Claude 3.7 S.69.476.372.771.464.869.385.366.7
GPT-4o68.180.373.777.860.769.993.859.6
GPT-4o Mini64.578.370.880.057.466.990.354.8
Qwen2.5-VL64.586.173.770.058.562.993.864.4
+ +Table 1: Judge performance for predicting success, measured with precision (§4.2). We report recall and F1 as auxiliary scores. We examine two variants of the simplified judge: one with the final accessibility tree (A), and the other with the final screenshot (S). *Rule-based evaluation are included for reference. + +in the development split and 1106 are in the test split (details in A.2). The annotators follow the process described in Section 3.1 to label all trajectories, producing a total of 3906 binary annotations. To assess agreement between annotators, we annotated the GPT-4o agent's trajectory on WebArena with a second annotator. We obtained an inter-annotator agreement of $89.3\%$ on success, indicating a high level of consistency among annotators. + +# 4 LLM judges for web tasks + +# 4.1 Judge implementations + +We consider two existing implementations of LLM judges for web agents, Agent Eval Refine (AER; Pan et al. 2024) and NNetNav (Murty et al., 2025), and introduce a simplified judge that simultaneously predicts success, side effects, and repetition. In Agent-as-a-Judge (Zhuge et al., 2024), the method assumes the judge can interact with the environment after the agent finishes executing actions, which isn't feasible when the environment state cannot be preserved or shareable across agents. Other LLM judge variants were proposed (He et al., 2024; Putta et al., 2024; Lai et al., 2024; Trabucco et al., 2025), but our three judge implementations cover major strategies for representing trajectories. + +AER (Pan et al., 2024) The judge in this framework takes as input the sequence of agent thoughts and actions alongside the final browser state, which is either passed to a vision-enabled model as a screenshot (AER-V) or as a caption generated by a captioner model (AER-C). Then, the judge outputs its reasoning before predicting success or failure. For both the judge and captioner, we implement this method using GPT-4o, which is an overall stronger model than the GPT-4 (Achiam et al., 2023) model originally used. + +NNNetNav (Murty et al., 2025) In this work, a Llama 3.1 70B judge receives a summary of changes across all observations and has to give a rating between 1 (worst) and 5 (best) after providing the thought process; the rating is binarized by thresholding at 4, based on the original implementation. To generate summaries, an LLM is used to describe the change between two observations based on the accessibility trees instead of screenshots. We use Llama 3.3 70B (Al-Dahle, 2024), an improved version of the original backbone. + +Simplified judge (ours) We propose a simplified design for our judge. First, it directly answers the three questions asked to the annotators. This allows it to return multiple labels within a single completion. Then, we decouple the system prompt and reasoning chain from the final state representation, allowing the judge to receive either the accessibility tree or the + +
A11YScreenSuccessSide EffectRepetition
PRF1PRF1PRF1
62.181.770.66.531.910.892.516.828.4
X61.586.171.77.270.813.078.646.458.3
X64.578.370.86.631.911.092.318.530.8
XX60.773.966.77.276.413.278.159.167.3
+ +Table 2: Ablation study of our GPT-4o mini judge, measured in precision (P), recall (R), and F1. We consider how including accessibility trees and screenshots in the input affects the predictions. + +screenshot. This differs from AER, which requires a vision-enabled model, and NNetNav, which requires a long-context model capable of receiving multiple accessibility trees. Our method is compatible with both multimodal and text-only LLMs and does not require a separate LLM to caption the screenshot or summarize changes across observations. + +# 4.2 Evaluation + +To evaluate LLM judges, we use the precision score, which is the ratio of true positives over all predicted positives (true + false positives). The metric is a good fit for rejection finetuning (RFT), where we are interested in increasing the number of true positives (actual successful trajectories) while reducing the number of false positives (failed trajectories added to the dataset due to poor LLM judgments). For reward modeling, we also want to prioritize true positives since they are the primary signals for many RL algorithms, while false positives should be minimized to avoid introducing noise to the loss function. Moreover, recall and F1 benefit from minimizing false negatives, which is useful for improving sample efficiency by reducing the number of valid trajectories removed; we report them as auxiliary metrics. + +# 4.3 Judge Performance + +In Table 1, we provide an overview of the performance of judges across benchmarks using the metrics defined in Section 4.2. We find that GPT-40 and Claude 3.7 Sonnet-based simplified judges achieve higher precision compared to prior approaches, indicating that removing the internal LLMs for captioning or summarizing changes does not hinder their capabilities. Notably, no judge consistently stands out across benchmarks, highlighting the importance of selecting an appropriate LLM backbone based on the nature of the task. + +Low precision limits existing judges We notice that no judge achieves above $70\%$ precision, which means that $30\%$ of trajectories are erroneously marked as successful. This severely limits the usefulness of the judges for downstream applications, such as using the filtered trajectories for finetuning an agent, as the agent will learn to generate incorrect trajectories for a substantial portion of the tasks. This indicates LLM judges are currently not a reliable way of assessing the true capabilities of agents. Consequently, judges will need to achieve higher precision before they are useful for automatic evaluation, which also affects their downstream utility for methods like RFT and RL. + +Official rule-based evaluation underestimates success Similar to LLM judges, the rule-based evaluation used by benchmarks can be compared with expert annotations. Since they use task-specific configurations to determine success, they may reject successful trajectories due to inconsequential differences. For instance, in WebArena, if a user asks "What's the closest national park to the largest city in Maine?", the agent may reply: "The closest national park to Portland [...] is Acadia National Park". Rule-based evaluation considers it unsuccessful since the configuration requires it to exactly match "Acadia National Park". As a result, the rule-based approach achieves a recall of $55.9\%$ , indicating a higher rate of false negatives compared to LLM judges. Overall, a substantial precision gap exists between rule-based methods and LLM judges, but rule-based methods severely underestimate the true performance of web agents, highlighting the need for more flexible automatic evaluation. + +Impact of Input Representation Browser screenshots represent an intuitive state for humans, but LLMs may need more than vision alone, as screenshots miss page structure + +
AgentHumanGPT-4o JudgeRule-based
VWAWAWk++VWAWAWk++VWAWAWk++
Claude 3.7 S.28.355.118.434.864.120.723.930.88.1
GPT-4o35.942.318.447.850.011.517.425.64.6
Llama 3.30.022.49.20.027.65.80.018.43.5
Qwen2.5-VL21.733.313.834.852.614.917.429.511.5
+ +Table 3: Success Rate of web agents measured by expert annotators, GPT-4o Judge (with accessibility tree) and rule-based evaluation on various benchmarks (§3.2). Results by environment are in Table 6. + +and hidden attributes found in accessibility trees. To investigate the impact of different representations, we ablate our GPT-4o-mini simplified judge in Table 2. We observe that only including screenshots achieves a high precision for success and repetition, whereas only including accessibility trees allows higher recall. Surprisingly, including both accessibility trees and screenshots yields a lower performance than including only the screenshot, indicating that more information distracts rather than assists the judge. + +# 5 Revisiting how we evaluate task success rate + +One of the core applications of LLM judges is to estimate the success rate on a web navigation benchmark, which is useful in scenarios where there are no dedicated functions to calculate the rule-based success rate, which is the standard evaluation for many web agent benchmarks. However, rule-based approaches may not always agree with experts. In Table 3, we compare the success rate calculated from expert annotations, rule-based evaluation, and a GPT-4o judge with accessibility trees. + +Rule-based evaluation does not reflect expert-defined success rates We notice a stark difference between the judge and rule-based approach: whereas the LLM judge tends to overestimate the success rate of every agent (with two exceptions in WorkArena++) rule-based methods consistently underestimate it. Moreover, the underestimation varies substantially, with the performance of GPT-4o being $16.7\%$ lower on WebArena and $18.5\%$ lower on VWA compared to expert annotations. This highlights a major discrepancy between the official task success rate reported by rule-based methods and the success rate according to expert annotators. For instance, rule-based evaluation ranks Qwen2.5-VL above GPT-4o on WebArena and WorkArena++ (and equally on VWA), whereas expert annotators prefer GPT-4o over Qwen2.5-VL on all benchmarks, with over $14\%$ higher success rate on VWA. Overall, this stresses the need to develop new methods to calculate task success rate that more precisely reflect expert judgments. + +# 6 Error Analysis + +In this section, we qualitatively examine failure cases of LLM judges. Following a GPT-4o judge, we focus on the common error categories to understand how LLM judges fail. + +Grounding mismatch When an agent misunderstands what is happening on the screen, its thought process may not reflect the reality of the webpage. In such cases, a judge without access to the screenshots may produce an erroneous judgment due to the agent misunderstanding what is happening on the screen. For example, when a user requests to show "the product page for the item in the second row, second column," the Qwen2.5-VL agent ends up selecting the second item in the first row. However, it writes in its reasoning chain that "Based on the layout of the page, the second row, second column item is the [energy Drink]." The judge accepts the agent's thought process without realizing it opened the wrong page: "The agent's goal was to navigate to the product page for the item in the second row, second column. The agent successfully reached the correct product page." + +Misleading agent reasoning The agent may have misleading elements in its reasoning, leading the judge to reason that the agent completed the task correctly. In a multi-step + +![](images/8dca0db1b855d122e7c1547c3afdb6ea23586839146460b25215fd8db956728c.jpg) +Figure 3: Example of judge error (discussed in Section 6). In this example, the user requests the agent to buy cereal in VisualWebArena, but the agent stops after adding it to the cart. The judge erroneously identified the trajectory as successful, even though the agent missed an important detail in the instruction. + +![](images/bd9a757f9b7f9157f128559ec0caf9545154edd153e70d195365a1a3307480bb.jpg) + +WorkArena++ task, the user requested the agent to apply a search filter to include a unique ID. After several unsuccessful attempts, the agent ended up stating it succeeded in its reasoning chain, even though no filter was applied. The judge was misled by the agent and wrote in its own reasoning chain that "The agent successfully [...] applied the filter to extract entries..." before incorrectly indicating that the trajectory was successful. + +Missed instruction details In some cases, the agent does not complete the task completely, missing crucial details from the instruction (see Figure 3). For example, when the user requests to "buy the cheapest cereal with a graphic character on the box in the Cereals category," the agent finds the correct product and informs the customer: "I've found the cheapest cereal with a graphic character on the box. It's Cocoa Puffs, 10.4 Oz Box..." However, it missed a crucial detail: the user requested that they buy the product. Unfortunately, the judge mistakenly believes that the agent completed a purchase: "The agent successfully identified and purchased the cheapest cereal with a graphic character on the box..." + +Misunderstanding action intents We found that the agent may misuse certain actions, leading to the trajectory to fail very close to completion. In such cases, the LLM judge may incorrectly decide that the trajectory is successful and ignore the misused action. In one instance where the goal was to answer "What is the minimum travel time by car from Animal Rescue League of Pittsburgh to Schenley park?", the Qwen2.5-VL agent completes all required actions, but ends up reporting the task as unfeasible instead of replying to the user. The GPT-4o judge (with screenshot) correctly reasons that the travel time was shown on the screen, but does not point out that reporting the task as unfeasible is incorrect, instead asserting that "all actions were confined to the task of finding the travel time." + +Overall, current LLM judges are limited by their capability to detect nuanced issues within trajectories, as shown by the judge missing details and misunderstanding an action. Moreover, they will easily agree with the agent's reasoning even when it is wrong, which has been previously observed in LLMs (Sharma et al., 2023). Future research should aim to address these issues to improve the performance of LLM judges for evaluating web agents. + +# 7 Conclusion + +We introduce AGENTREWARDBENCH, a benchmark for evaluating LLM judges for web agent trajectories. The benchmark consists of over 1300 trajectories, each annotated by experts across three dimensions: whether the agent succeeded, whether it caused unintended side effects, and whether it repeated unnecessary actions. We evaluate 12 LLM judges on AGENTREWARDBENCH and find that simpler input representation can achieve + +higher agreement with expert annotators compared to prior approaches. Moreover, we find that rule-based evaluation, often used by environment-based benchmarks, does not achieve a lower-than-expected agreement with experts. Instead, it tends to reject many valid trajectories, which results in the success rate of certain web agents being lower than what an expert would perceive. Overall, we believe our benchmark will help researchers design better LLM judges for web agents trajectories, which will enable the design of automatic evaluators and reward models that better reflect expert judgments. + +# Acknowledgments + +Xing Han Lu acknowledges the support of the Natural Sciences and Engineering Research Council of Canada (NSERC) [funding reference no. 579403]. The project is supported by the Google-Mila grant. We thank Alexandre Lacoste, Shikhar Murty, and the McGill NLP group members for helpful discussions. + +# References + +Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023. URL https://arxiv.org/abs/2303.08774. +Adept. Act-1: Transformer for actions, 2022. URL https://www.adept.ai/blog/act-1/. +Ahmad Al-Dahle. The future of ai: Built with llama, December 2024. URL https://ai.meta.com/blog/future-of-ai-built-with-llama/. +Anthropic. Introducing computer use, a new claude 3.5 sonnet, and claude 3.5 haiku, 2024a. URL https://www.anthropic.com/news/3-5-models-and-computer-use. +Anthropic. The claude 3 model family: Opus, sonnet, haiku, 2024b. URL https://apisemantic scholar.org/CorpusID:268232499. +Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, Humen Zhong, Yuanzhi Zhu, Mingkun Yang, Zhaohai Li, Jianqiang Wan, Pengfei Wang, Wei Ding, Zheren Fu, Yiheng Xu, Jiabo Ye, Xi Zhang, Tianbao Xie, Zesen Cheng, Hang Zhang, Zhibo Yang, Haiyang Xu, and Junyang Lin. Qwen2.5-vl technical report, 2025. URL https://arxiv.org/abs/2502.13923. +Léo Boisvert, Megh Thakkar, Maxime Gasse, Massimo Caccia, Thibault Le Sellier De Chezelles, Quentin Cappart, Nicolas Chapados, Alexandre Lacoste, and Alexandre Drouin. Workarena++: Towards compositional planning and reasoning-based common knowledge work tasks, 2025. URL https://arxiv.org/abs/2407.05291. +Greg Brockman, Vicki Cheung, Ludwig Pettersson, Jonas Schneider, John Schulman, Jie Tang, and Wojciech Zaremba. Openai gym, 2016. URL https://arxiv.org/abs/1606.01540. +Dongping Chen, Ruoxi Chen, Shilin Zhang, Yinuo Liu, Yaochen Wang, Huichi Zhou, Qihui Zhang, Yao Wan, Pan Zhou, and Lichao Sun. Mllm-as-a-judge: Assessing multimodal llm-as-a-judge with vision-language benchmark, 2024. URL https://arxiv.org/abs/2402.04788. +Thibault Le Sellier De Chezelles, Maxime Gasse, Alexandre Drouin, Massimo Caccia, Léo Boisvert, Megh Thakkar, Tom Marty, Rim Assouel, Sahar Omidi Shayegan, Lawrence Keunho Jang, Xing Han Lu, Ori Yoran, Dehan Kong, Frank F. Xu, Siva Reddy, Quentin Cappart, Graham Neubig, Ruslan Salakhutdinov, Nicolas Chapados, and Alexandre Lacoste. The browsergym ecosystem for web agent research, 2025. URL https://arxiv.org/abs/2412.05467. +Xiang Deng, Yu Gu, Boyuan Zheng, Shijie Chen, Sam Stevens, Boshi Wang, Huan Sun, and Yu Su. Mind2web: Towards a generalist agent for the web. Advances in Neural Information Processing Systems, 36:28091-28114, 2023. + +Alexandre Drouin, Maxime Gasse, Massimo Caccia, Issam H. Laradji, Manuel Del Verme, Tom Marty, Léo Boisvert, Megh Thakkar, Quentin Cappart, David Vazquez, Nicolas Chapados, and Alexandre Lacoste. Workarena: How capable are web agents at solving common knowledge work tasks?, 2024. URL https://arxiv.org/abs/2403.07718. +Aarash Feizi, Sai Rajeswar, Adriana Romero-Soriano, Reihaneh Rabbany, Valentina Zantedeschi, Spandana Gella, and João Monteiro. Pairbench: Are vision-language models reliable at comparing what they see?, 2025. URL https://arxiv.org/abs/2502.15210. +Aaron Grattafori, Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Alex Vaughan, et al. The llama 3 herd of models. arXiv preprint, 2024. URL https://arxiv.org/abs/2407.21783. +Izzeddin Gur, Ulrich Rueckert, Aleksandra Faust, and Dilek Hakkani-Tur. Learning to navigate the web. arXiv preprint arXiv:1812.09195, 2018. +Izzeddin Gur, Hiroki Furuta, Austin Huang, Mustafa Safdari, Yutaka Matsuo, Douglas Eck, and Aleksandra Faust. A real-world webagent with planning, long context understanding, and program synthesis. arXiv preprint arXiv:2307.12856, 2023. +Hongliang He, Wenlin Yao, Kaixin Ma, Wenhao Yu, Yong Dai, Hongming Zhang, Zhenzhong Lan, and Dong Yu. Webvoyager: Building an end-to-end web agent with large multimodal models. ArXiv, abs/2401.13919, 2024. URL https://api-semanticscholar.org/CorpusID:267211622. +Peter C Humphreys, David Raposo, Toby Pohlen, Gregory Thornton, Rachita Chhaparia, Alistair Muldal, Josh Abramson, Petko Georgiev, Alex Goldin, Adam Santoro, and Timothy Lillicrap. A data-driven approach for learning to control computers, 2022. URL https://arxiv.org/abs/2202.08137. +Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024. +Jing Yu Koh, Robert Lo, Lawrence Jang, Vikram Duvvur, Ming Chong Lim, Po-Yu Huang, Graham Neubig, Shuyan Zhou, Ruslan Salakhutdinov, and Daniel Fried. VisualWebArena: Evaluating Multimodal Agents on Realistic Visual Web Tasks, June 2024. URL http://arxiv.org/abs/2401.13649. arXiv:2401.13649 [cs]. +Hanyu Lai, Xiao Liu, Iat Long Iong, Shuntian Yao, Yuxuan Chen, Pengbo Shen, Hao Yu, Hanchen Zhang, Xiaohan Zhang, Yuxiao Dong, and Jie Tang. Autowebglm: A large language model-based web navigating agent, 2024. URL https://arxiv.org/abs/2404.03648. +Nathan Lambert, Valentina Pyatkin, Jacob Morrison, LJ Miranda, Bill Yuchen Lin, Khyathi Chandu, Nouha Dziri, Sachin Kumar, Tom Zick, Yejin Choi, Noah A. Smith, and Hannaneh Hajishirzi. Rewardbench: Evaluating reward models for language modeling, 2024. URL https://arxiv.org/abs/2403.13787. +Evan Zheran Liu, Kelvin Guu, Panupong Pasupat, Tianlin Shi, and Percy Liang. Reinforcement Learning on Web Interfaces using Workflow-guided Exploration. In 6th International Conference on Learning Representations, ICLR 2018, Vancouver, BC, Canada, April 30 - May 3, 2018, Conference Track Proceedings, 2018. URL https://openreview.net/forum?id=ryTp3f-0-. +Xing Han Lu, Zdenek Kasner, and Siva Reddy. Weblinx: Real-world website navigation with multi-turn dialogue. arXiv preprint arXiv:2402.05930, 2024. +Shikhar Murty, Christopher Manning, Peter Shaw, Mandar Joshi, and Kenton Lee. BAGEL: Bootstrapping Agents by Guiding Exploration with Language, June 2024. URL http://arxiv.org/abs/2403.08140.arXiv:2403.08140 [cs]. + +Shikhar Murty, Hao Zhu, Dzmitry Bahdanau, and Christopher D. Manning. Nnetnav: Unsupervised learning of browser agents through environment interaction in the wild, 2025. URL https://arxiv.org/abs/2410.02907. +Reiichiro Nakano, Jacob Hilton, Suchir Balaji, Jeff Wu, Long Ouyang, Christina Kim, Christopher Hesse, Shantanu Jain, Vineet Kosaraju, William Saunders, et al. Webgpt: Browser-assisted question-answering with human feedback. arXiv preprint arXiv:2112.09332, 2021. +OpenAI. Introducing operator, January 2025. URL https://openai.com/index/introducing-operator. +Jiayi Pan, Yichi Zhang, Nicholas Tomlin, Yifei Zhou, Sergey Levine, and Alane Suhr. Autonomous evaluation and refinement of digital agents, 2024. URL https://arxiv.org/abs/2404.06474. +Ajay Patel, Markus Hofmacher, Claudiu Leoveanu-Condrei, Marius-Constantin Dinu, Chris Callison-Burch, and Sepp Hochreiter. Large language models can self-improve at web agent tasks, 2024. URL https://arxiv.org/abs/2405.20309. +Pranav Putta, Edmund Mills, Naman Garg, Sumeet Motwani, Chelsea Finn, Divyansh Garg, and Rafael Rafailov. Agent q: Advanced reasoning and learning for autonomous ai agents, 2024. URL https://arxiv.org/abs/2408.07199. +Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36:53728-53741, 2023. +Mrinank Sharma, Meg Tong, Tomasz Korbak, David Duvenaud, Amanda Askell, Samuel R. Bowman, Newton Cheng, Esin Durmus, Zac Hatfield-Dodds, Scott R. Johnston, Shauna Kravec, Timothy Maxwell, Sam McCandlish, Kamal Ndousse, Oliver Rausch, Nicholas Schiefer, Da Yan, Miranda Zhang, and Ethan Perez. Towards understanding sycophancy in language models, 2023. URL https://arxiv.org/abs/2310.13548. +Peter Shaw, Mandar Joshi, James Cohan, Jonathan Berant, Panupong Pasupat, Hexiang Hu, Urvashi Khandelwal, Kenton Lee, and Kristina Toutanova. From pixels to UI actions: Learning to follow instructions via graphical user interfaces. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=3PjCt4kmRx. +Tianlin Shi, Andrej Karpathy, Linxi Fan, Jonathan Hernandez, and Percy Liang. World of bits: An open-domain platform for web-based agents. In International Conference on Machine Learning, pp. 3135-3144. PMLR, 2017. +Robert St. Amant and Luke S. Zettlemoyer. The user interface as an agent environment. In Proceedings of the Fourth International Conference on Autonomous Agents, AGENTS '00, pp. 483-490, New York, NY, USA, 2000. Association for Computing Machinery. ISBN 1581132301. doi: 10.1145/336595.337575. URL https://doi.org/10.1145/336595.337575. +Brandon Trabucco, Gunnar Sigurdsson, Robinson Piramuthu, and Ruslan Salakhutdinov. Towards internet-scale training for agents, 2025. URL https://arxiv.org/abs/2502.06776. +Shunyu Yao, Howard Chen, John Yang, and Karthik Narasimhan. WebShop: Towards Scalable Real-world Web Interaction with Grounded Language Agents. In NeurIPS, 2022. URL https://arxiv.org/abs/2207.01206. +Ori Yoran, Samuel Joseph Amouyal, Chaitanya Malaviya, Ben Bogin, Ofir Press, and Jonathan Berant. Assistantbench: Can web agents solve realistic and time-consuming tasks?, 2024. URL https://arxiv.org/abs/2407.15711. +Boyuan Zheng, Boyu Gou, Jihyung Kil, Huan Sun, and Yu Su. Gpt-4v(ision) is a generalist web agent, if grounded, 2024. URL https://arxiv.org/abs/2401.01614. + +Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric P. Xing, Hao Zhang, Joseph E. Gonzalez, and Ion Stoica. Judging llm-as-a-judge with mt-bench and chatbot arena, 2023. URL https://arxiv.org/abs/2306.05685. +Shuyan Zhou, Frank F. Xu, Hao Zhu, Xuhui Zhou, Robert Lo, Abishek Sridhar, Xianyi Cheng, Tianyue Ou, Yonatan Bisk, Daniel Fried, Uri Alon, and Graham Neubig. WebArena: A Realistic Web Environment for Building Autonomous Agents, April 2024. URL http://arxiv.org/abs/2307.13854. arXiv:2307.13854 [cs]. +Mingchen Zhuge, Changsheng Zhao, Dylan Ashley, Wenyi Wang, Dmitrii Khizbullin, Yunyang Xiong, Zechun Liu, Ernie Chang, Raghuraman Krishnamoorthi, Yuandong Tian, Yangyang Shi, Vikas Chandra, and Jürgen Schmidhuber. Agent-as-a-judge: Evaluate agents with agents, 2024. URL https://arxiv.org/abs/2410.10934. + +# A Benchmark + +# A.1 Environment and Experiments Details + +AssistantBench Although an unlimited number of websites can be visited, we observed that the agents visited a total of 66 unique domains between 1 and 129 times across all trajectories we collected. The number of times a domain was visited can be found in Table 4. Additionally, we replace the default search engine with an alternative search engine (https://duckduckgo.com) as the original homepage blocks browser automation, which renders the tasks unachievable. + +Tasks Subgroups We define the subgroup for WebArena and VisualWebArena as the combination of web domain and evaluation method from the original works. The evaluation methods consist of string matching, HTML-based programs, webpage image querying, and final URL matching. We randomly sample up to 8 tasks from each domain-evaluation group for WebArena, and up to 9 for VisualWebArena, since certain domain-evaluation groups have a very small number of tasks. For WorkArena, we attempt to evenly distribute the task categories. As a result, we have the following task distributions: + +- WebArena: Wikipedia (8), Map (18), Reddit (18), Shopping Admin (18), Shopping (19), Gitlab (19) +- VisualWebArena: Wikipedia (17), Reddit (27), Classifieds (28), Shopping (28) +- WorkArena: Sophisticated memory (15), Information retrieval (20), Contextual understanding infeasible tasks (21), Planning and problem solving (22), Data driven decision making and reasoning (22) + +Agent Hyperparameters The binary flags used in AgentLab (Chezelles et al., 2025) are shown in Table 5. We set a maximum limit of 40K input tokens and 8192 output tokens. + +Agent Platform Implementation In addition to abstracting websites and browser engines into Gym-compatible environments (Brockman et al., 2016), BrowserGym (Drouin et al., 2024; Chezelles et al., 2025) offers advanced preprocessing of complex web inputs (i.e., DOM and accessibility trees) and can automatically parse LLM output and execute them as browser actions like clicks, form inputs, tab actions, etc. Additionally, the BrowserGym ecosystem includes AgentLab, a framework for processing input representation and managing web agent experiments. We use AgentLab to design our representation function $R$ , ensuring unified hyperparameters and inputs. As a result, we can avoid unintended differences that may arise from customizing prompts and representations for each LLM. + +# A.2 Annotations + +Trajectory filtering In total, 351 tasks were considered across 5 benchmarks (33 in AssistantBench, 100 in VisualWebArena, 100 in WebArena, 18 in WorkArena, and 100 in WorkArena++) We collect trajectories from agents built from each of three multimodal models: Claude 3.7 Sonnet, GPT-4o, Qwen2.5-VL. Moreover, since Llama 3.3 is not multimodal, we only collect trajectories on 251 tasks (excluding VisualWebArena). Additionally, Llama 3.3 did not complete two WebArena tasks (nos. 735 and 805) due to timeout issues that consistently occurred in the environment, despite multiple attempts to restart. Thus, we obtain a total of 1302 trajectories, where 196 are stored in the development split and 1106 in the test split. + +Interface To annotate the trajectories, we designed a fully customized annotation interface using Gradio (see Figure 4). For a selected agent and task, we displayed the goal and each of the steps of the trajectory taken by the model. It shows the model's reasoning and action, as well as a screenshot with the action element on overlay. Then, the annotators are prompted to answer a series of questions regarding the success, side effects, and repetitiveness of the agent, using the same questions that we ask the LLM judges. + +Shared Knowledge Given that the annotators are experts, it is possible that the annotators share knowledge of web agents that non-expert may not possess; we identify several shared + +knowledge facts. (1) web agent design and capabilities: the annotators are aware that the agents are designed with LLMs, some of which have multimodal capabilities, and that they are capable of generating reasoning traces to support actions, and that the LLMs may be subject to hallucination or may product repetitive sequences of text. (2) dedicated web environments: the annotators know that several the websites used in the project come from prior publications in the domain, including WebArena (Zhou et al., 2024), VisualWebArena (Koh et al., 2024), WorkArena (Drouin et al., 2024; Boisvert et al., 2025). They are aware that some of the websites are designed specific for the task, whereas others come from real-world websites. (3) Automatic Evaluation: the annotators know that the web environments employ automatic evaluation methods, such as string matching and URL matching, to evaluate the agents. Thus, a task that is successful or unsuccessful may terminate earlier, but the agent will not be guaranteed to receive a positive reward for that task. + +Annotator agreements and disagreements resolution For most tasks, binary annotations can be produced. However, in some cases, the annotator may not be certain of their annotation, and are allowed to mark a trajectory as uncertain, which was subsequently reviewed by the other annotators. In some cases, annotators may disagree with their judgments. In general, a salient reason for mismatch is the ambiguity of the instructions. For example, a task instruction might mention "buy a black t-shirt", but may not specify if it is fully black or can have other graphics. In such cases, annotators are advised to go for the most lenient option. More generally, to ensure that annotators can easily voice their uncertainty and disagreement, the first half of the annotation was conducted in person with all annotators present concurrently. Thus, when an annotator was uncertain about the annotation for a trajectory, they can ask other annotators, who can deliberate about the correct annotation until a consensus is reached. This approach further allows other annotators to align to the consensus for the remaining annotations. + +# A.3 LLM Judges + +**Prompts** We use simple system prompt (Figure 5) and user message (Figure 6) templates without model-specific commands, allowing our prompt to be transferred to any LLM. We use distinct tags, such as and , to facilitate parsing the model output. + +Results We report extended results for 10 LLM judges, with the overall results in Table 7 and the finegrained results in Table 8 over all agents; the unaggregated results are presented in Tables 9 to 12. + +![](images/67479fd205ea8a95ae38b87aa49014397d48430a2207fb34bad96a8b193cd8f6.jpg) + +![](images/b4d93f4c536918e29a5020c6118dc73a1ced2f509d0c0f5cdc73963e81050241.jpg) +Figure 4: User Interface used by annotators for answering questions + +![](images/3cfbbd1665745f18b8e63dd716304db7e7b8a9a40ac4b8cb12767659dbd37df2.jpg) + +
Domain#Domain#Domain#
duckduckgo.com129google.com112wizards.com24
blackbaudhosting.com21fedex.com17mtggoldfish.com17
usps.com12fidelity.ca12weather.gov10
yelp.com9linkedin.com9rottentomatoes.com9
nih.gov8tcgplayer.com8imdb.com8
yahoo.com7cagreatamerica.com7thedrinknation.com6
tripadvisor.com6express.dhl6californiagreatamerica.com5
seattlechildrensmuseum.org5monday.com5fubo.tv5
philamuseum.org5weatherspark.com5bing.com5
ensembl.org4wellhub.com4hubbioo.com3
wholefoodsmarket.com3alltrails.com3target.com2
andersmartialarts.com2wikipedia.org2sfyimby.com2
currentresults.com2stockanalysis.com2speakrj.com2
x.com2apple.com2extremeweatherwatch.com2
tmplclubs.com2sixflags.com1etf.com1
amazon.com1netflixreleases.com1weather-and-climate.com1
wunderground.com1redfin.com1talesofamountainmama.com1
themeparkcenter.com1seattleweatherblog.com1chromewebdata1
peacefoodnyc.com1sec.gov1calicolabs.com1
easyship.com1onlineshippingcalculator.com1tripadvisor.ca1
nyunews.com1fandango.com1aimobo.io1
anytots.com1morningstar.com1visitphilly.com1
+ +Table 4: AssistantBench Website Visit Counts + +
ValueFlags
Trueuseifax, use_som, use-thinking, use_concrete_example, use_ABSTRACT_example, use_hints, be_cautious
Falseuse_html, usepast_error_logs, use Think_history, use_diff, filter Visible_elements_only, long_description, individual/examples, use_plan, use_criticise, use_memory, enable chatting
+ +Table 5: Agentlab Hyperparameters + +
BenchmarkAgentExpertLLM JudgeRule-based
AssistantBenchClaude 3.7 S.11.111.10.8
GPT-4o14.814.83.7
Llama 3.33.77.45.3
Qwen2.5-VL0.00.02.2
WebArenaClaude 3.7 S.55.164.130.8
GPT-4o42.350.025.6
Llama 3.322.427.618.4
Qwen2.5-VL33.352.629.5
VisualWebArenaClaude 3.7 S.28.334.823.9
GPT-4o35.947.817.4
Qwen2.5-VL21.734.817.4
WorkArenaClaude 3.7 S.68.868.850.0
GPT-4o50.056.250.0
Llama 3.356.250.056.2
Qwen2.5-VL56.256.256.2
WorkArena++Claude 3.7 S.18.420.78.1
GPT-4o18.411.54.6
Llama 3.39.25.83.5
Qwen2.5-VL13.814.911.5
OverallClaude 3.7 S.33.038.020.4
GPT-4o31.335.316.3
Llama 3.317.017.513.3
Qwen2.5-VL22.331.719.5
+ +Table 6: Success Rate by evaluation type. For the LLM judge, we use GPT-4o with accessibility trees. + +
JudgeSuccessSide EffectRepetition
PRF1PRF1PRF1
AER-C67.771.969.7------
AER-V67.671.569.5------
Claude 3.7 S. (A)68.881.674.714.034.720.082.894.988.4
Claude 3.7 S. (S)69.476.372.714.144.421.482.094.587.8
Functional83.855.967.1------
GPT-4o (A)69.883.175.97.791.714.280.496.987.9
GPT-4o (S)68.180.373.77.590.313.879.296.286.9
GPT-4o Mini (A)61.586.171.77.270.813.078.646.458.3
GPT-4o Mini (S)64.578.370.86.631.911.092.318.530.8
Llama 3.3 (A)67.779.072.96.979.212.780.191.685.5
NNetNav52.582.464.1------
Qwen2.5-VL (A)64.389.875.09.055.615.488.172.679.6
Qwen2.5-VL (S)64.586.173.78.858.315.288.764.674.7
+ +Table 7: Results over all benchmarks by judge. We report the precision (P) as the primary metric, and F1 and recall (R) as the auxiliary metrics. + +
BenchmarkJudgeSuccessSide EffectRepetition
PRF1PRF1PRF1
AssistantBenchAER-C83.362.571.4------
AER-V83.362.571.4------
Claude 3.7 S. (A)87.587.587.57.133.311.870.194.080.3
Claude 3.7 S. (S)71.462.566.77.733.312.570.892.080.0
Functional25.012.516.7------
GPT-4o (A)77.887.582.43.2100.06.368.696.080.0
GPT-4o (S)77.887.582.43.2100.06.367.194.078.3
GPT-4o Mini (A)80.0100.088.92.766.75.259.332.041.6
GPT-4o Mini (S)80.0100.088.93.433.36.390.018.030.0
Llama 3.3 (A)75.075.075.03.4100.06.665.896.078.0
NNNetNav20.862.531.2------
Qwen2.5-VL (A)72.7100.084.20.00.00.073.862.067.4
Qwen2.5-VL (S)70.087.577.85.766.710.572.252.060.5
VisualWebArenaAER-C56.070.962.6------
AER-V61.279.769.2------
Claude 3.7 S. (A)61.077.268.216.735.722.778.796.886.8
Claude 3.7 S. (S)64.874.769.417.142.924.577.897.686.6
Functional85.258.269.2------
GPT-4o (A)63.086.172.712.489.321.875.298.485.2
GPT-4o (S)60.782.369.912.492.921.972.799.283.9
GPT-4o Mini (A)57.983.568.410.764.318.474.446.056.9
GPT-4o Mini (S)57.473.464.411.235.717.187.121.434.4
Llama 3.3 (A)59.674.766.312.082.121.074.492.182.3
NNNetNav54.569.661.1------
Qwen2.5-VL (A)59.388.671.117.271.427.886.973.879.8
Qwen2.5-VL (S)58.587.370.015.464.324.883.359.569.4
WebArenaAER-C68.883.275.3------
AER-V67.684.074.9------
Claude 3.7 S. (A)69.389.177.911.828.616.784.988.686.7
Claude 3.7 S. (S)69.389.177.914.838.121.380.885.182.9
Functional79.053.864.0------
GPT-4o (A)70.289.178.59.690.517.482.993.988.1
GPT-4o (S)69.989.978.77.976.214.382.792.187.1
GPT-4o Mini (A)63.590.874.710.171.417.770.339.550.6
GPT-4o Mini (S)66.986.675.57.528.611.986.711.420.1
Llama 3.3 (A)68.286.676.38.676.215.479.786.082.7
NNNetNav54.390.867.9------
Qwen2.5-VL (A)63.694.175.96.728.610.984.763.272.4
Qwen2.5-VL (S)62.992.474.89.438.115.190.550.064.4
WorkArenaAER-C100.081.189.6------
AER-V96.473.083.1------
Claude 3.7 S. (A)85.091.988.38.350.014.376.086.480.8
Claude 3.7 S. (S)85.378.481.70.00.00.077.377.377.3
Functional100.091.995.8------
GPT-4o (A)94.694.694.62.650.05.070.486.477.5
GPT-4o (S)93.881.187.05.3100.010.072.081.876.6
GPT-4o Mini (A)84.286.585.33.350.06.266.736.447.1
GPT-4o Mini (S)90.375.782.40.00.00.0100.018.230.8
Llama 3.3 (A)94.389.291.70.00.00.081.881.881.8
NNNetNav77.391.983.9------
Qwen2.5-VL (A)87.291.989.50.00.00.0100.059.174.3
Qwen2.5-VL (S)93.881.187.00.00.00.086.759.170.3
WorkArena++AER-C66.742.351.8------
AER-V59.330.840.5------
Claude 3.7 S. (A)66.762.764.717.138.923.787.597.492.2
Claude 3.7 S. (S)66.750.057.113.661.122.287.398.992.8
Functional83.338.552.6------
GPT-4o (A)63.055.859.25.5100.010.385.698.591.6
GPT-4o (S)59.653.856.65.5100.010.484.598.290.8
GPT-4o Mini (A)49.476.960.15.283.39.787.852.966.1
GPT-4o Mini (S)54.865.459.64.633.38.196.520.233.4
Llama 3.3 (A)62.761.562.14.783.38.986.793.890.1
NNNetNav43.278.855.8------
Qwen2.5-VL (A)60.378.868.37.577.813.791.979.085.0
Qwen2.5-VL (S)64.473.168.56.477.811.893.275.783.6
+ +Table 8: Finegrained results by benchmark and judge for all agents. We report the precision (P) as the primary metric, and F1 and recall (R) as the auxiliary metrics. + +
BenchmarkJudgeSuccessSide EffectRepetition
PRF1PRF1PRF1
AssistantBenchAER-C83.362.571.4------
AER-V83.362.571.4------
Claude 3.7 S. (A)87.587.587.57.133.311.870.194.080.3
Claude 3.7 S. (S)71.462.566.77.733.312.570.892.080.0
Functional25.012.516.7------
GPT-4o (A)77.887.582.43.2100.06.368.696.080.0
GPT-4o (S)77.887.582.43.2100.06.367.194.078.3
GPT-4o Mini (A)80.0100.088.92.766.75.259.332.041.6
GPT-4o Mini (S)80.0100.088.93.433.36.390.018.030.0
Llama 3.3 (A)75.075.075.03.4100.06.665.896.078.0
NNNetNav20.862.531.2------
Qwen2.5-VL (A)72.7100.084.20.00.00.073.862.067.4
Qwen2.5-VL (S)70.087.577.85.766.710.572.252.060.5
VisualWebArenaAER-C56.070.962.6------
AER-V61.279.769.2------
Claude 3.7 S. (A)61.077.268.216.735.722.778.796.886.8
Claude 3.7 S. (S)64.874.769.417.142.924.577.897.686.6
Functional85.258.269.2------
GPT-4o (A)63.086.172.712.489.321.875.298.485.2
GPT-4o (S)60.782.369.912.492.921.972.799.283.9
GPT-4o Mini (A)57.983.568.410.764.318.474.446.056.9
GPT-4o Mini (S)57.473.464.411.235.717.187.121.434.4
Llama 3.3 (A)59.674.766.312.082.121.074.492.182.3
NNNetNav54.569.661.1------
Qwen2.5-VL (A)59.388.671.117.271.427.886.973.879.8
Qwen2.5-VL (S)58.587.370.015.464.324.883.359.569.4
WebArenaAER-C68.883.275.3------
AER-V67.684.074.9------
Claude 3.7 S. (A)69.389.177.911.828.616.784.988.686.7
Claude 3.7 S. (S)69.389.177.914.838.121.380.885.182.9
Functional79.053.864.0------
GPT-4o (A)70.289.178.59.690.517.482.993.988.1
GPT-4o (S)69.989.978.77.976.214.382.792.187.1
GPT-4o Mini (A)63.590.874.710.171.417.770.339.550.6
GPT-4o Mini (S)66.986.675.57.528.611.986.711.420.1
Llama 3.3 (A)68.286.676.38.676.215.479.786.082.7
NNNetNav54.390.867.9------
Qwen2.5-VL (A)63.694.175.96.728.610.984.763.272.4
Qwen2.5-VL (S)62.992.474.89.438.115.190.550.064.4
WorkArenaAER-C100.081.189.6------
AER-V96.473.083.1------
Claude 3.7 S. (A)85.091.988.38.350.014.376.086.480.8
Claude 3.7 S. (S)85.378.481.70.00.00.077.377.377.3
Functional100.091.995.8------
GPT-4o (A)94.694.694.62.650.05.070.486.477.5
GPT-4o (S)93.881.187.05.3100.010.072.081.876.6
GPT-4o Mini (A)84.286.585.33.350.06.266.736.447.1
GPT-4o Mini (S)90.375.782.40.00.00.0100.018.230.8
Llama 3.3 (A)94.389.291.70.00.00.081.881.881.8
NNNetNav77.391.983.9------
Qwen2.5-VL (A)87.291.989.50.00.00.0100.059.174.3
Qwen2.5-VL (S)93.881.187.00.00.00.086.759.170.3
WorkArena++AER-C66.742.351.8------
AER-V59.330.840.5------
Claude 3.7 S. (A)66.762.764.717.138.923.787.597.492.2
Claude 3.7 S. (S)66.750.057.113.661.122.287.398.992.8
Functional83.338.552.6------
GPT-4o (A)63.055.859.25.5100.010.385.698.591.6
GPT-4o (S)59.653.856.65.5100.010.484.598.290.8
GPT-4o Mini (A)49.476.960.15.283.39.787.852.966.1
GPT-4o Mini (S)54.865.459.64.633.38.196.520.233.4
Llama 3.3 (A)62.761.562.14.783.38.986.793.890.1
NNNetNav43.278.855.8------
Qwen2.5-VL (A)60.378.868.37.577.813.791.979.085.0
Qwen2.5-VL (S)64.473.168.56.477.811.893.275.783.6
+ +Table 9: Finegrained results by benchmark and judge for Qwen2.5-VL agent. We report the precision (P) as the primary metric, and F1 and recall (R) as the auxiliary metrics. + +
BenchmarkJudgeSuccessSide EffectRepetition
PRF1PRF1PRF1
AssistantBenchAER-C83.362.571.4------
AER-V83.362.571.4------
Claude 3.7 S. (A)87.587.587.57.133.311.870.194.080.3
Claude 3.7 S. (S)71.462.566.77.733.312.570.892.080.0
Functional25.012.516.7------
GPT-4o (A)77.887.582.43.2100.06.368.696.080.0
GPT-4o (S)77.887.582.43.2100.06.367.194.078.3
GPT-4o Mini (A)80.0100.088.92.766.75.259.332.041.6
GPT-4o Mini (S)80.0100.088.93.433.36.390.018.030.0
Llama 3.3 (A)75.075.075.03.4100.06.665.896.078.0
NNNetNav20.862.531.2------
Qwen2.5-VL (A)72.7100.084.20.00.00.073.862.067.4
Qwen2.5-VL (S)70.087.577.85.766.710.572.252.060.5
VisualWebArenaAER-C56.070.962.6------
AER-V61.279.769.2------
Claude 3.7 S. (A)61.077.268.216.735.722.778.796.886.8
Claude 3.7 S. (S)64.874.769.417.142.924.577.897.686.6
Functional85.258.269.2------
GPT-4o (A)63.086.172.712.489.321.875.298.485.2
GPT-4o (S)60.782.369.912.492.921.972.799.283.9
GPT-4o Mini (A)57.983.568.410.764.318.474.446.056.9
GPT-4o Mini (S)57.473.464.411.235.717.187.121.434.4
Llama 3.3 (A)59.674.766.312.082.121.074.492.182.3
NNNetNav54.569.661.1------
Qwen2.5-VL (A)59.388.671.117.271.427.886.973.879.8
Qwen2.5-VL (S)58.587.370.015.464.324.883.359.569.4
WebArenaAER-C68.883.275.3------
AER-V67.684.074.9------
Claude 3.7 S. (A)69.389.177.911.828.616.784.988.686.7
Claude 3.7 S. (S)69.389.177.914.838.121.380.885.182.9
Functional79.053.864.0------
GPT-4o (A)70.289.178.59.690.517.482.993.988.1
GPT-4o (S)69.989.978.77.976.214.382.792.187.1
GPT-4o Mini (A)63.590.874.710.171.417.770.339.550.6
GPT-4o Mini (S)66.986.675.57.528.611.986.711.420.1
Llama 3.3 (A)68.286.676.38.676.215.479.786.082.7
NNNetNav54.390.867.9------
Qwen2.5-VL (A)63.694.175.96.728.610.984.763.272.4
Qwen2.5-VL (S)62.992.474.89.438.115.190.550.064.4
WorkArenaAER-C100.081.189.6------
AER-V96.473.083.1------
Claude 3.7 S. (A)85.091.988.38.350.014.376.086.480.8
Claude 3.7 S. (S)85.378.481.70.00.00.077.377.377.3
Functional100.091.995.8------
GPT-4o (A)94.694.694.62.650.05.070.486.477.5
GPT-4o (S)93.881.187.05.3100.010.072.081.876.6
GPT-4o Mini (A)84.286.585.33.350.06.266.736.447.1
GPT-4o Mini (S)90.375.782.40.00.00.0100.018.230.8
Llama 3.3 (A)94.389.291.70.00.00.081.881.881.8
NNNetNav77.391.983.9------
Qwen2.5-VL (A)87.291.989.50.00.00.0100.059.174.3
Qwen2.5-VL (S)93.881.187.00.00.00.086.759.170.3
WorkArena++AER-C66.742.351.8------
AER-V59.330.840.5------
Claude 3.7 S. (A)66.762.764.717.138.923.787.597.492.2
Claude 3.7 S. (S)66.750.057.113.661.122.287.398.992.8
Functional83.338.552.6------
GPT-4o (A)63.055.859.25.5100.010.385.698.591.6
GPT-4o (S)59.653.856.65.5100.010.484.598.290.8
GPT-4o Mini (A)49.476.960.15.283.39.787.852.966.1
GPT-4o Mini (S)54.865.459.64.633.38.196.520.233.4
Llama 3.3 (A)62.761.562.14.783.38.986.793.890.1
NNNetNav43.278.855.8------
Qwen2.5-VL (A)60.378.868.37.577.813.791.979.085.0
Qwen2.5-VL (S)64.473.168.56.477.811.893.275.783.6
+ +Table 10: Finegrained results by benchmark and judge for Llama 3.3 agent. We report the precision (P) as the primary metric, and F1 and recall (R) as the auxiliary metrics. + +
BenchmarkJudgeSuccessSide EffectRepetition
PRF1PRF1PRF1
AssistantBenchAER-C83.362.571.4------
AER-V83.362.571.4------
Claude 3.7 S. (A)87.587.587.57.133.311.870.194.080.3
Claude 3.7 S. (S)71.462.566.77.733.312.570.892.080.0
Functional25.012.516.7------
GPT-4o (A)77.887.582.43.2100.06.368.696.080.0
GPT-4o (S)77.887.582.43.2100.06.367.194.078.3
GPT-4o Mini (A)80.0100.088.92.766.75.259.332.041.6
GPT-4o Mini (S)80.0100.088.93.433.36.390.018.030.0
Llama 3.3 (A)75.075.075.03.4100.06.665.896.078.0
NNNetNav20.862.531.2------
Qwen2.5-VL (A)72.7100.084.20.00.00.073.862.067.4
Qwen2.5-VL (S)70.087.577.85.766.710.572.252.060.5
VisualWebArenaAER-C56.070.962.6------
AER-V61.279.769.2------
Claude 3.7 S. (A)61.077.268.216.735.722.778.796.886.8
Claude 3.7 S. (S)64.874.769.417.142.924.577.897.686.6
Functional85.258.269.2------
GPT-4o (A)63.086.172.712.489.321.875.298.485.2
GPT-4o (S)60.782.369.912.492.921.972.799.283.9
GPT-4o Mini (A)57.983.568.410.764.318.474.446.056.9
GPT-4o Mini (S)57.473.464.411.235.717.187.121.434.4
Llama 3.3 (A)59.674.766.312.082.121.074.492.182.3
NNNetNav54.569.661.1------
Qwen2.5-VL (A)59.388.671.117.271.427.886.973.879.8
Qwen2.5-VL (S)58.587.370.015.464.324.883.359.569.4
WebArenaAER-C68.883.275.3------
AER-V67.684.074.9------
Claude 3.7 S. (A)69.389.177.911.828.616.784.988.686.7
Claude 3.7 S. (S)69.389.177.914.838.121.380.885.182.9
Functional79.053.864.0------
GPT-4o (A)70.289.178.59.690.517.482.993.988.1
GPT-4o (S)69.989.978.77.976.214.382.792.187.1
GPT-4o Mini (A)63.590.874.710.171.417.770.339.550.6
GPT-4o Mini (S)66.986.675.57.528.611.986.711.420.1
Llama 3.3 (A)68.286.676.38.676.215.479.786.082.7
NNNetNav54.390.867.9------
Qwen2.5-VL (A)63.694.175.96.728.610.984.763.272.4
Qwen2.5-VL (S)62.992.474.89.438.115.190.550.064.4
WorkArenaAER-C100.081.189.6------
AER-V96.473.083.1------
Claude 3.7 S. (A)85.091.988.38.350.014.376.086.480.8
Claude 3.7 S. (S)85.378.481.70.00.00.077.377.377.3
Functional100.091.995.8------
GPT-4o (A)94.694.694.62.650.05.070.486.477.5
GPT-4o (S)93.881.187.05.3100.010.072.081.876.6
GPT-4o Mini (A)84.286.585.33.350.06.266.736.447.1
GPT-4o Mini (S)90.375.782.40.00.00.0100.018.230.8
Llama 3.3 (A)94.389.291.70.00.00.081.881.881.8
NNNetNav77.391.983.9------
Qwen2.5-VL (A)87.291.989.50.00.00.0100.059.174.3
Qwen2.5-VL (S)93.881.187.00.00.00.086.759.170.3
WorkArena++AER-C66.742.351.8------
AER-V59.330.840.5------
Claude 3.7 S. (A)66.762.764.717.138.923.787.597.492.2
Claude 3.7 S. (S)66.750.057.113.661.122.287.398.992.8
Functional83.338.552.6------
GPT-4o (A)63.055.859.25.5100.010.385.698.591.6
GPT-4o (S)59.653.856.65.5100.010.484.598.290.8
GPT-4o Mini (A)49.476.960.15.283.39.787.852.966.1
GPT-4o Mini (S)54.865.459.64.633.38.196.520.233.4
Llama 3.3 (A)62.761.562.14.783.38.986.793.890.1
NNNetNav43.278.855.8------
Qwen2.5-VL (A)60.378.868.37.577.813.791.979.085.0
Qwen2.5-VL (S)64.473.168.56.477.811.893.275.783.6
+ +Table 11: Finegrained results by benchmark and judge for Claude 3.7 Sonnet agent. We report the precision (P) as the primary metric, and F1 and recall (R) as the auxiliary metrics. + +
BenchmarkJudgeSuccessSide EffectRepetition
PRF1PRF1PRF1
AssistantBenchAER-C83.362.571.4------
AER-V83.362.571.4------
Claude 3.7 S. (A)87.587.587.57.133.311.870.194.080.3
Claude 3.7 S. (S)71.462.566.77.733.312.570.892.080.0
Functional25.012.516.7------
GPT-4o (A)77.887.582.43.2100.06.368.696.080.0
GPT-4o (S)77.887.582.43.2100.06.367.194.078.3
GPT-4o Mini (A)80.0100.088.92.766.75.259.332.041.6
GPT-4o Mini (S)80.0100.088.93.433.36.390.018.030.0
Llama 3.3 (A)75.075.075.03.4100.06.665.896.078.0
NNNetNav20.862.531.2------
Qwen2.5-VL (A)72.7100.084.20.00.00.073.862.067.4
Qwen2.5-VL (S)70.087.577.85.766.710.572.252.060.5
VisualWebArenaAER-C56.070.962.6------
AER-V61.279.769.2------
Claude 3.7 S. (A)61.077.268.216.735.722.778.796.886.8
Claude 3.7 S. (S)64.874.769.417.142.924.577.897.686.6
Functional85.258.269.2------
GPT-4o (A)63.086.172.712.489.321.875.298.485.2
GPT-4o (S)60.782.369.912.492.921.972.799.283.9
GPT-4o Mini (A)57.983.568.410.764.318.474.446.056.9
GPT-4o Mini (S)57.473.464.411.235.717.187.121.434.4
Llama 3.3 (A)59.674.766.312.082.121.074.492.182.3
NNNetNav54.569.661.1------
Qwen2.5-VL (A)59.388.671.117.271.427.886.973.879.8
Qwen2.5-VL (S)58.587.370.015.464.324.883.359.569.4
WebArenaAER-C68.883.275.3------
AER-V67.684.074.9------
Claude 3.7 S. (A)69.389.177.911.828.616.784.988.686.7
Claude 3.7 S. (S)69.389.177.914.838.121.380.885.182.9
Functional79.053.864.0------
GPT-4o (A)70.289.178.59.690.517.482.993.988.1
GPT-4o (S)69.989.978.77.976.214.382.792.187.1
GPT-4o Mini (A)63.590.874.710.171.417.770.339.550.6
GPT-4o Mini (S)66.986.675.57.528.611.986.711.420.1
Llama 3.3 (A)68.286.676.38.676.215.479.786.082.7
NNNetNav54.390.867.9------
Qwen2.5-VL (A)63.694.175.96.728.610.984.763.272.4
Qwen2.5-VL (S)62.992.474.89.438.115.190.550.064.4
WorkArenaAER-C100.081.189.6------
AER-V96.473.083.1------
Claude 3.7 S. (A)85.091.988.38.350.014.376.086.480.8
Claude 3.7 S. (S)85.378.481.70.00.00.077.377.377.3
Functional100.091.995.8------
GPT-4o (A)94.694.694.62.650.05.070.486.477.5
GPT-4o (S)93.881.187.05.3100.010.072.081.876.6
GPT-4o Mini (A)84.286.585.33.350.06.266.736.447.1
GPT-4o Mini (S)90.375.782.40.00.00.0100.018.230.8
Llama 3.3 (A)94.389.291.70.00.00.081.881.881.8
NNNetNav77.391.983.9------
Qwen2.5-VL (A)87.291.989.50.00.00.0100.059.174.3
Qwen2.5-VL (S)93.881.187.00.00.00.086.759.170.3
WorkArena++AER-C66.742.351.8------
AER-V59.330.840.5------
Claude 3.7 S. (A)66.762.764.717.138.923.787.597.492.2
Claude 3.7 S. (S)66.750.057.113.661.122.287.398.992.8
Functional83.338.552.6------
GPT-4o (A)63.055.859.25.5100.010.385.698.591.6
GPT-4o (S)59.653.856.65.5100.010.484.598.290.8
GPT-4o Mini (A)49.476.960.15.283.39.787.852.966.1
GPT-4o Mini (S)54.865.459.64.633.38.196.520.233.4
Llama 3.3 (A)62.761.562.14.783.38.986.793.890.1
NNNetNav43.278.855.8------
Qwen2.5-VL (A)60.378.868.37.577.813.791.979.085.0
Qwen2.5-VL (S)64.473.168.56.477.811.893.275.783.6
+ +Table 12: Finegrained results by benchmark and judge for GPT-4o agent. We report the precision (P) as the primary metric, and F1 and recall (R) as the auxiliary metrics. + +# System Prompt Template + +You are a Language Model specialized in judging the performance of web agents in web navigation tasks. For a certain website, you are given the goal of a navigation task, the current URL of the webpage, the actions taken by the agent, and the thought process of the agent. Additionally, you will have access to the first and last screenshots, as well as the last accessibility tree. Your task is to answer several questions about the agent's performance in the task. + +Question 1: Was the sequence of actions below successful in achieving the goal? +Choices: Successful, Unsuccessful + +Question 2: Did the agent perform unnecessary actions that could lead to unintended side effects? +Choices: Yes, No + +Question 3: Did the agent perform the task optimally, by only performing necessary actions and avoiding unnecessary ones? + +Choices: 1. Complete Failure, 2. Suboptimal, 3. Somewhat Optimal, 4. Completely Optimal + +Question 4: Did the agent loop through a sequence of actions that did not make progress towards the goal? +Choices: Yes, No + +You should provide your reasoning process for each question, and you can refer to the screenshots and the accessibility tree to support your answers. Your answer must follow the following format: + +your reasoning here answer answer answer answer + +Figure 5: System Prompt Template used for the simplified judge + +# User Prompt Template + +```txt +The user goal is: {goal} +The agent performed the following actions: +Step: {step_number} +URL: {url} +Action: {action} +Reasoning: {reasoning} +----- +The last accessibility tree is: +{axtree} +Here is the screenshot of the last step. +{screenshot} +Provide your reasoning and answer the four questions from the system prompt, using +the specified format. +``` + +Figure 6: User Prompt Template used for the simplified judge \ No newline at end of file diff --git a/data/2025/2504_08xxx/2504.08942/images/307853ee995fe2de7274f47458079ca5894b5e3b2a5641a08611b24b94c6ed5a.jpg b/data/2025/2504_08xxx/2504.08942/images/307853ee995fe2de7274f47458079ca5894b5e3b2a5641a08611b24b94c6ed5a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..61a6f218938d1bf0098446faee324082679b8ee6 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08942/images/307853ee995fe2de7274f47458079ca5894b5e3b2a5641a08611b24b94c6ed5a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f027cd54c08e89bbac84e69fe1a503a05f14bb21fba921dc24d0e49270e3f6b +size 99044 diff --git a/data/2025/2504_08xxx/2504.08942/images/3638d2893524c1d751349fe73edd5030d584b750e19926704cc9ef9bb7bf8899.jpg b/data/2025/2504_08xxx/2504.08942/images/3638d2893524c1d751349fe73edd5030d584b750e19926704cc9ef9bb7bf8899.jpg new file mode 100644 index 0000000000000000000000000000000000000000..559744a3f1eaa71c7942e596c778cd18228b115b --- /dev/null +++ b/data/2025/2504_08xxx/2504.08942/images/3638d2893524c1d751349fe73edd5030d584b750e19926704cc9ef9bb7bf8899.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:639af7531b20d3456235c771cddce02f5a4df05f42fbd5bcf9fbdd2bb45acc8b +size 288841 diff --git a/data/2025/2504_08xxx/2504.08942/images/3b3b523d52ea3d449263831d809c2bca3078fa57ca50ab960a8cb6a967fc27a2.jpg b/data/2025/2504_08xxx/2504.08942/images/3b3b523d52ea3d449263831d809c2bca3078fa57ca50ab960a8cb6a967fc27a2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..62b018857ff5dbe6ab19ea6338023bff1adb81a4 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08942/images/3b3b523d52ea3d449263831d809c2bca3078fa57ca50ab960a8cb6a967fc27a2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:28cffe21bd701e33f91fe1830f2588b2e4d243a800c919e02c90cd893a645aba +size 88106 diff --git a/data/2025/2504_08xxx/2504.08942/images/3cfbbd1665745f18b8e63dd716304db7e7b8a9a40ac4b8cb12767659dbd37df2.jpg b/data/2025/2504_08xxx/2504.08942/images/3cfbbd1665745f18b8e63dd716304db7e7b8a9a40ac4b8cb12767659dbd37df2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f0158f2e2796c3990c0c3dd131cc387b0fcb2a48 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08942/images/3cfbbd1665745f18b8e63dd716304db7e7b8a9a40ac4b8cb12767659dbd37df2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ee5ec2317d6fa075ddc41492ae49f49349087d0bc84c1cb36842eaaf4840caf +size 31855 diff --git a/data/2025/2504_08xxx/2504.08942/images/3fcd6d6c6c53ef5d655d6e0ff13f9bfff714b04e8a95435c83bca1a6b39cc8ae.jpg b/data/2025/2504_08xxx/2504.08942/images/3fcd6d6c6c53ef5d655d6e0ff13f9bfff714b04e8a95435c83bca1a6b39cc8ae.jpg new file mode 100644 index 0000000000000000000000000000000000000000..04afd7d8d77ce37688ede5a6a3f314772b37d9cc --- /dev/null +++ b/data/2025/2504_08xxx/2504.08942/images/3fcd6d6c6c53ef5d655d6e0ff13f9bfff714b04e8a95435c83bca1a6b39cc8ae.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36ba4a71889840b1c90891d62cce405eec3aff7a1423415264af08d874d00923 +size 100031 diff --git a/data/2025/2504_08xxx/2504.08942/images/44813690a64ed31c86bd46805a16225d99f96e5c83308ced722d0c5e48357ff5.jpg b/data/2025/2504_08xxx/2504.08942/images/44813690a64ed31c86bd46805a16225d99f96e5c83308ced722d0c5e48357ff5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ba45d4477de2df640d5cc7ea70ffcb8b76355dd2 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08942/images/44813690a64ed31c86bd46805a16225d99f96e5c83308ced722d0c5e48357ff5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb2961118f021b5ab7f27da6473ae12b5eb401cae2f0ef1f54ca94ac8741255a +size 91446 diff --git a/data/2025/2504_08xxx/2504.08942/images/4863bb5ba625a6f6e63f09af52fe2c622bd6c22434726514333cd02a82329430.jpg b/data/2025/2504_08xxx/2504.08942/images/4863bb5ba625a6f6e63f09af52fe2c622bd6c22434726514333cd02a82329430.jpg new file mode 100644 index 0000000000000000000000000000000000000000..818aa98e9f42fbea2b9dc0dc697d5765211e0ddc --- /dev/null +++ b/data/2025/2504_08xxx/2504.08942/images/4863bb5ba625a6f6e63f09af52fe2c622bd6c22434726514333cd02a82329430.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:694a03274cebec1e1325df686c09395f339d64c8cc61bafee93fe09630a34b9a +size 287655 diff --git a/data/2025/2504_08xxx/2504.08942/images/55cf8ed31386a867855e3097e6c2a90acd7ed00a9c2e4344bc24ab3f9d7afdf9.jpg b/data/2025/2504_08xxx/2504.08942/images/55cf8ed31386a867855e3097e6c2a90acd7ed00a9c2e4344bc24ab3f9d7afdf9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..818aa98e9f42fbea2b9dc0dc697d5765211e0ddc --- /dev/null +++ b/data/2025/2504_08xxx/2504.08942/images/55cf8ed31386a867855e3097e6c2a90acd7ed00a9c2e4344bc24ab3f9d7afdf9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:694a03274cebec1e1325df686c09395f339d64c8cc61bafee93fe09630a34b9a +size 287655 diff --git a/data/2025/2504_08xxx/2504.08942/images/5632a9bac292c4bee36a707181d78d6ad711c2e7f7fcd6e2c8dde1e3c012afc3.jpg b/data/2025/2504_08xxx/2504.08942/images/5632a9bac292c4bee36a707181d78d6ad711c2e7f7fcd6e2c8dde1e3c012afc3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2f156d037bd82d908ba8ece67dbb6a450f7e03fc --- /dev/null +++ b/data/2025/2504_08xxx/2504.08942/images/5632a9bac292c4bee36a707181d78d6ad711c2e7f7fcd6e2c8dde1e3c012afc3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0421683d306745e0e0fd18f5194a342a363f258b48293e9c05f925c99d6bf718 +size 41394 diff --git a/data/2025/2504_08xxx/2504.08942/images/67479fd205ea8a95ae38b87aa49014397d48430a2207fb34bad96a8b193cd8f6.jpg b/data/2025/2504_08xxx/2504.08942/images/67479fd205ea8a95ae38b87aa49014397d48430a2207fb34bad96a8b193cd8f6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7c3ac306b96e3842c80cf55243a8bedc4bd64985 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08942/images/67479fd205ea8a95ae38b87aa49014397d48430a2207fb34bad96a8b193cd8f6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4816167ed20f1f45343c3339ce0e7f69d8c26e08c6a1014315054012fb81ae6f +size 50517 diff --git a/data/2025/2504_08xxx/2504.08942/images/77007d4343c7aab2936fe569092fef7ca49b6622819e1c83f346774f57a1305e.jpg b/data/2025/2504_08xxx/2504.08942/images/77007d4343c7aab2936fe569092fef7ca49b6622819e1c83f346774f57a1305e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..559744a3f1eaa71c7942e596c778cd18228b115b --- /dev/null +++ b/data/2025/2504_08xxx/2504.08942/images/77007d4343c7aab2936fe569092fef7ca49b6622819e1c83f346774f57a1305e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:639af7531b20d3456235c771cddce02f5a4df05f42fbd5bcf9fbdd2bb45acc8b +size 288841 diff --git a/data/2025/2504_08xxx/2504.08942/images/785c9e954ce36bf5a7ab213d6ff6ad7916337d2e98fbdbfea2cb611e1577c5dd.jpg b/data/2025/2504_08xxx/2504.08942/images/785c9e954ce36bf5a7ab213d6ff6ad7916337d2e98fbdbfea2cb611e1577c5dd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1564ac6bea38b74b7511f10067d0c94889698e07 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08942/images/785c9e954ce36bf5a7ab213d6ff6ad7916337d2e98fbdbfea2cb611e1577c5dd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:989db94cde1ce9900e7691221701ba73459624b297201cc94d46799782724f9f +size 30949 diff --git a/data/2025/2504_08xxx/2504.08942/images/79bcf03814eb760d5fc8e0eb52317dc44bb68f0d0eb22fcbf1d8db118ab2505d.jpg b/data/2025/2504_08xxx/2504.08942/images/79bcf03814eb760d5fc8e0eb52317dc44bb68f0d0eb22fcbf1d8db118ab2505d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c0dda6d9e61290f22db50c6fd40883843d29762d --- /dev/null +++ b/data/2025/2504_08xxx/2504.08942/images/79bcf03814eb760d5fc8e0eb52317dc44bb68f0d0eb22fcbf1d8db118ab2505d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aca90a01fcd9d9759ab35512c2a128df75cfc36aba01cf7d41e44cbe6db92f82 +size 99002 diff --git a/data/2025/2504_08xxx/2504.08942/images/7f2421a723f50fa8df7200781d075cb0c69a98bd3470aca8dc2f648844c87aab.jpg b/data/2025/2504_08xxx/2504.08942/images/7f2421a723f50fa8df7200781d075cb0c69a98bd3470aca8dc2f648844c87aab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..818aa98e9f42fbea2b9dc0dc697d5765211e0ddc --- /dev/null +++ b/data/2025/2504_08xxx/2504.08942/images/7f2421a723f50fa8df7200781d075cb0c69a98bd3470aca8dc2f648844c87aab.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:694a03274cebec1e1325df686c09395f339d64c8cc61bafee93fe09630a34b9a +size 287655 diff --git a/data/2025/2504_08xxx/2504.08942/images/7f48a6d6dffbee0c753c2cd9ee9598cf1c13ea70fb6596793215d0a51a81fe4d.jpg b/data/2025/2504_08xxx/2504.08942/images/7f48a6d6dffbee0c753c2cd9ee9598cf1c13ea70fb6596793215d0a51a81fe4d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8da898323824c1790596b4e541a115566e26b61a --- /dev/null +++ b/data/2025/2504_08xxx/2504.08942/images/7f48a6d6dffbee0c753c2cd9ee9598cf1c13ea70fb6596793215d0a51a81fe4d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16a536fc516d6e0e6a872f0e0382aa09be4bff4aeda2e4ffc41dd4121f4a5b0c +size 44169 diff --git a/data/2025/2504_08xxx/2504.08942/images/8dca0db1b855d122e7c1547c3afdb6ea23586839146460b25215fd8db956728c.jpg b/data/2025/2504_08xxx/2504.08942/images/8dca0db1b855d122e7c1547c3afdb6ea23586839146460b25215fd8db956728c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0ad91270f878fd737ae091dedce342cfee3c9e60 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08942/images/8dca0db1b855d122e7c1547c3afdb6ea23586839146460b25215fd8db956728c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:462ab12e418983447363da171988d0a77cab7339be876a2519a54e9dc17918b7 +size 48306 diff --git a/data/2025/2504_08xxx/2504.08942/images/b4d93f4c536918e29a5020c6118dc73a1ced2f509d0c0f5cdc73963e81050241.jpg b/data/2025/2504_08xxx/2504.08942/images/b4d93f4c536918e29a5020c6118dc73a1ced2f509d0c0f5cdc73963e81050241.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d9b1edac230b11fcbcaf73af048ff9fcd5b80fb1 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08942/images/b4d93f4c536918e29a5020c6118dc73a1ced2f509d0c0f5cdc73963e81050241.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ec4d314154a29c19460017ef59b2e0c253cf77922568c5cb48475c4ddf26abe +size 28108 diff --git a/data/2025/2504_08xxx/2504.08942/images/bd9a757f9b7f9157f128559ec0caf9545154edd153e70d195365a1a3307480bb.jpg b/data/2025/2504_08xxx/2504.08942/images/bd9a757f9b7f9157f128559ec0caf9545154edd153e70d195365a1a3307480bb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f2874459960ca9526d6e9ec751a1d28455628326 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08942/images/bd9a757f9b7f9157f128559ec0caf9545154edd153e70d195365a1a3307480bb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:581363a0725589f858427826d23cf77a7c8760cfb204df6d92b3a10dcc0f180b +size 46875 diff --git a/data/2025/2504_08xxx/2504.08942/images/fea37e571b8202015c318f34a31dad1d1cf79bfd93e97315862a68c6d58c4491.jpg b/data/2025/2504_08xxx/2504.08942/images/fea37e571b8202015c318f34a31dad1d1cf79bfd93e97315862a68c6d58c4491.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a7c7a1bd30dbb97c70a979095fd41062f7fa86a1 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08942/images/fea37e571b8202015c318f34a31dad1d1cf79bfd93e97315862a68c6d58c4491.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4eaa3d316d3019c239eb174737056460b3f2da874ad20877226f1b14a8f50e28 +size 48543 diff --git a/data/2025/2504_08xxx/2504.08942/layout.json b/data/2025/2504_08xxx/2504.08942/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..38aa84c9a49f3e61957510191340dcc00527b182 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08942/layout.json @@ -0,0 +1,8613 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 79, + 504, + 111 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 504, + 111 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 504, + 111 + ], + "type": "text", + "content": "AGENTREWARDBENCH: Evaluating Automatic Evaluations of Web Agent Trajectories" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 110, + 131, + 323, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 131, + 323, + 144 + ], + "spans": [ + { + "bbox": [ + 110, + 131, + 323, + 144 + ], + "type": "text", + "content": "Xing Han Lu12 Amirhossein Kazemnejad*2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 110, + 144, + 469, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 144, + 469, + 156 + ], + "spans": [ + { + "bbox": [ + 110, + 144, + 469, + 156 + ], + "type": "text", + "content": "Nicholas Meade12 Arkil Patel12 Dongchan Shin2 Alejandra Zambrano2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 110, + 156, + 471, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 156, + 471, + 169 + ], + "spans": [ + { + "bbox": [ + 110, + 156, + 471, + 169 + ], + "type": "text", + "content": "Karolina Stanczak12 Peter Shaw4 Christopher J. Pal2567 Siva Reddy1257" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 110, + 169, + 496, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 169, + 496, + 182 + ], + "spans": [ + { + "bbox": [ + 110, + 169, + 496, + 182 + ], + "type": "text", + "content": "*Core contributor ¹McGill University ²Mila Quebec AI Institute ⁴Google DeepMind" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 110, + 182, + 452, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 182, + 452, + 193 + ], + "spans": [ + { + "bbox": [ + 110, + 182, + 452, + 193 + ], + "type": "text", + "content": "5Canada CIFAR AI Chair 6Polytechnique Montréal 7ServiceNow Research" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 110, + 193, + 359, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 193, + 359, + 204 + ], + "spans": [ + { + "bbox": [ + 110, + 193, + 359, + 204 + ], + "type": "text", + "content": "xing.han.lu@mail.mcgill.ca; siva.reddy@mila.quebec" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 281, + 232, + 330, + 245 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 281, + 232, + 330, + 245 + ], + "spans": [ + { + "bbox": [ + 281, + 232, + 330, + 245 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 140, + 258, + 471, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 258, + 471, + 501 + ], + "spans": [ + { + "bbox": [ + 140, + 258, + 471, + 501 + ], + "type": "text", + "content": "Web agents enable users to perform tasks on web browsers through natural language interaction. Evaluating web agents trajectories is an important problem, since it helps us determine whether the agent successfully completed the tasks. Rule-based methods are widely used for this purpose, but they are challenging to extend to new tasks and may not always recognize successful trajectories. We may achieve higher accuracy through human evaluation, but the process would be substantially slower and more expensive. Automatic evaluations with LLMs may avoid the challenges of designing new rules and manually annotating trajectories, enabling faster and cost-effective evaluation. However, it is unclear how effective they are at evaluating web agents. To this end, we propose AGENTREWARD-BENCH, the first benchmark to assess the effectiveness of LLM judges for evaluating web agents. AGENTREWARD-BENCH contains 1302 trajectories across 5 benchmarks and 4 LLMs. Each trajectory in AGENTREWARD-BENCH is reviewed by an expert, who answers questions pertaining to the success, side effects, and repetitiveness of the agent. Using our benchmark, we evaluate 12 LLM judges and find that no single LLM excels across all benchmarks. We also find that the rule-based evaluation used by common benchmarks tends to underreport the success rate of web agents, highlighting a key weakness of rule-based evaluation and the need to develop more flexible automatic evaluations. We release the benchmark at: https://agent-reward-bench.github.io" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 526, + 195, + 538 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 526, + 195, + 538 + ], + "spans": [ + { + "bbox": [ + 105, + 526, + 195, + 538 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 549, + 506, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 549, + 506, + 628 + ], + "spans": [ + { + "bbox": [ + 104, + 549, + 506, + 628 + ], + "type": "text", + "content": "Giving a Large Language Model (LLM) access to a web browser unlocks an entirely new capability paradigm: beyond interacting with a user through a chat interface, such models can interact with the online world to complete tasks similar to how a human would. The promise of a new paradigm has motivated the design of LLMs to control interfaces such as web browsers, starting from earlier foundation models such as ACT-1 (Adept, 2022) to the more recent OpenAI Operator (OpenAI, 2025) and Claude Computer use (Anthropic, 2024a), showing promising results in real-world tasks (Zhou et al., 2024)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "content": "To measure the progress of web agents, a well-designed benchmark should compile a collection of realistic tasks across diverse websites. As illustrated in Figure 1, a user may ask the agent to locate a Classifieds listing for a Google Pixel phone and submit an offer via a comment. Inside a dedicated environment (e.g., a self-hosted Classifieds site), the web agent would complete the task by filling the search bar, identifying the correct listing, and writing a comment to show interest in purchasing the item. To determine if the agent successfully completed the request, we need to automatically evaluate the agent's chosen actions – known as trajectories – using a set of rules uniquely designed for the task of finding a Pixel phone on Classifieds. As expected, rule-based evaluation is time-consuming for" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 225, + 35, + 563 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 225, + 35, + 563 + ], + "spans": [ + { + "bbox": [ + 14, + 225, + 35, + 563 + ], + "type": "text", + "content": "arXiv:2504.08942v2 [cs.LG] 6 Oct 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 113, + 81, + 497, + 270 + ], + "blocks": [ + { + "bbox": [ + 113, + 81, + 497, + 270 + ], + "lines": [ + { + "bbox": [ + 113, + 81, + 497, + 270 + ], + "spans": [ + { + "bbox": [ + 113, + 81, + 497, + 270 + ], + "type": "image", + "image_path": "44813690a64ed31c86bd46805a16225d99f96e5c83308ced722d0c5e48357ff5.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 278, + 504, + 310 + ], + "lines": [ + { + "bbox": [ + 104, + 278, + 504, + 310 + ], + "spans": [ + { + "bbox": [ + 104, + 278, + 504, + 310 + ], + "type": "text", + "content": "Figure 1: Example from AGENTREWARDBENCH, where an LLM judge evaluates a web agent's trajectory on VisualWebArena (Koh et al., 2024). The benchmark compares judgments against expert annotations to determine the effectiveness of the judge for evaluating web agents." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 340, + 506, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 340, + 506, + 397 + ], + "spans": [ + { + "bbox": [ + 104, + 340, + 506, + 397 + ], + "type": "text", + "content": "experts to design, and may not cover every successful scenario (e.g., what if the agent finds a different but valid listing?). It is also possible for an expert to annotate the trajectories, but it would be slow and expensive to scale across many web agents. This brings us to the following questions: Given a web agent trajectory, can an LLM decide if it is successful? If so, how do we determine which LLM is the most capable at evaluating web agents?" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 401, + 506, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 401, + 506, + 491 + ], + "spans": [ + { + "bbox": [ + 104, + 401, + 506, + 491 + ], + "type": "text", + "content": "Past works have shown that LLMs can be used as judges to evaluate the output of LLM chatbots (Zheng et al., 2023). More recently, LLM judges have been used for automatically evaluating trajectories from web agents (Pan et al., 2024; Murty et al., 2025; Trabucco et al., 2025). With highly accurate automatic evaluation methods, we can measure the progress of web agents on new sets of tasks, use them to synthesize trajectories for finetuning smaller models, and design reward models that can be used in a reinforcement learning (RL) setting. However, it remains unclear whether current automatic evaluators, whether rule-based or LLM-based, can predict the success of a trajectory in a way that reflects expert judgment." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 495, + 506, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 495, + 506, + 650 + ], + "spans": [ + { + "bbox": [ + 104, + 495, + 506, + 650 + ], + "type": "text", + "content": "To address this problem, we introduce AGENTREWARDBENCH (§3), a benchmark for determining the capability of an LLM at evaluating web agents (see Figure 1). It consists of 1300 trajectories produced by 4 popular LLM agents on 5 diverse web environments, ranging from common tasks like online shopping and posting on a forum, to highly specialized requests in professional environments, such as updating task schedules on IT task management platforms. Each trajectory is labeled by expert annotators to determine whether the agent successfully completed the task, caused unintended side effects, or entered cycles of repetitive actions. Using this benchmark, we evaluate both existing and novel LLM judges (§4) alongside rule-based evaluation. We find that rule-based methods, which are used as the official automatic evaluation by environment-based benchmarks, severely underestimate the capabilities of agents and do not reflect how experts define success (§5). We further provide an in-depth analysis (§6) that highlights the weaknesses of existing LLMs when used as judges. Overall, we believe AGENTREWARDBENCH can be used to enable better automatic evaluation and reward modeling for web agents." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 663, + 206, + 676 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 663, + 206, + 676 + ], + "spans": [ + { + "bbox": [ + 105, + 663, + 206, + 676 + ], + "type": "text", + "content": "2 Related Works" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "type": "text", + "content": "Web Agents and Environments Designing agents that can automatically navigate user interfaces has been a long standing problem; earlier approaches employed program-based heuristics (St. Amant & Zettlemoyer, 2000), whereas later works on web navigation focus on training reinforcement learning (RL) models (Gur et al., 2018; Humphreys et al., 2022)," + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 161 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 161 + ], + "type": "text", + "content": "language models (Nakano et al., 2021; Gur et al., 2023; Deng et al., 2023) and multimodal models (Shaw et al., 2023; Lu et al., 2024; Zheng et al., 2024). To measure the advancements in web agents, various benchmarks have been proposed, with initial works proposing simplified environments (Shi et al., 2017; Liu et al., 2018) and subsequent iterations focusing on specific tasks like web shopping (Yao et al., 2022). More recent benchmarks focus on designing realistic environments that cover commonly used websites (Zhou et al., 2024; Koh et al., 2024) as well as specialized environments (Drouin et al., 2024; Boisvert et al., 2025)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 166, + 506, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 166, + 506, + 344 + ], + "spans": [ + { + "bbox": [ + 106, + 166, + 506, + 344 + ], + "type": "text", + "content": "LLM Judges Zheng et al. (2023) proposed using LLMs to predict human preferences of dialogue completion for chat models. They show that a GPT-4-based judge achieves over " + }, + { + "bbox": [ + 106, + 166, + 506, + 344 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 106, + 166, + 506, + 344 + ], + "type": "text", + "content": " agreement with human votes on the task of selecting better completions between models pairs. Follow-up extends this framework to new modalities (Chen et al., 2024), metrics (Feizi et al., 2025) and coding agents (Zhuge et al., 2024); the latter, Agent-as-a-Judge, leverages intermediate feedback from the environment. He et al. (2024) extend the idea by using LLMs to judge trajectories from web agents, allowing them to determine task completion without human annotators, resulting in a high correlation with humans on a private subset of trajectories. To determine the quality of automatic judgments, Pan et al. (2024) evaluate four LLM judges using trajectories from a GPT-4 agent on WebArena tasks, and find that the best judge achieves " + }, + { + "bbox": [ + 106, + 166, + 506, + 344 + ], + "type": "inline_equation", + "content": "80.6\\%" + }, + { + "bbox": [ + 106, + 166, + 506, + 344 + ], + "type": "text", + "content": " accuracy against the rule-based evaluator from WebArena. Unlike prior works on LLM judges, we design AGENTREWARDBENCH with trajectories from several LLM agents on diverse web benchmarks, where each one is annotated by human experts on multiple dimensions. By following a human-focused approach similar to Lambert et al. (2024), we ensure that LLM judges are evaluated against expert preferences on a wide range of scenarios." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 350, + 506, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 350, + 506, + 473 + ], + "spans": [ + { + "bbox": [ + 104, + 350, + 506, + 473 + ], + "type": "text", + "content": "Trajectory Synthesis Leveraging web environments that can be created and reset without real-world impact, recent works started to explore generating trajectories without human supervision. Leveraging LLM judges and LLM-generated tasks, trajectory synthesis can be used to bootstrap agent-judge training loops (Murty et al., 2024; 2025), to create contrastive pairs (Putta et al., 2024) for direct preference optimization (Rafailov et al., 2023), or as training data to finetune a base model (Lai et al., 2024; Patel et al., 2024; Trabucco et al., 2025). Although all the methods leverage an LLM judge, they lack a clear way of directly determining the quality of judgments, instead relying on the downstream performance improvement to validate their approach. To this end, AGENTREWARDBENCH enables researchers to choose the most appropriate LLM judge for a category of web tasks based on their effectiveness at evaluating web agents." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 487, + 255, + 498 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 487, + 255, + 498 + ], + "spans": [ + { + "bbox": [ + 105, + 487, + 255, + 498 + ], + "type": "text", + "content": "3 AGENTREWARDBENCH" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 510, + 506, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 510, + 506, + 598 + ], + "spans": [ + { + "bbox": [ + 104, + 510, + 506, + 598 + ], + "type": "text", + "content": "In this work, we introduce AGENTREWARDBENCH, a benchmark designed to assess the capabilities of LLM judges for evaluating web agents (§3.1). We curate 5 diverse web environments and tasks (§3.2) in order to collect trajectories from web agents based on 4 LLMs (§3.3). For each trajectory, a team of expert annotators carefully reviews the screenshots, actions, and the agent's reasoning chains before labeling them as either successful or unsuccessful, alongside other auxiliary labels (see Figure 2). Finally, we evaluate LLM judges (Table 1) by comparing their predictions with expert annotations to determine their effectiveness for automatic evaluation." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 609, + 239, + 620 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 609, + 239, + 620 + ], + "spans": [ + { + "bbox": [ + 105, + 609, + 239, + 620 + ], + "type": "text", + "content": "3.1 Assessment Framework" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 629, + 506, + 730 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 629, + 506, + 730 + ], + "spans": [ + { + "bbox": [ + 104, + 629, + 506, + 730 + ], + "type": "text", + "content": "Trajectory Definition Let " + }, + { + "bbox": [ + 104, + 629, + 506, + 730 + ], + "type": "inline_equation", + "content": "o_i" + }, + { + "bbox": [ + 104, + 629, + 506, + 730 + ], + "type": "text", + "content": " be an observation of a browser at time step " + }, + { + "bbox": [ + 104, + 629, + 506, + 730 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 629, + 506, + 730 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 629, + 506, + 730 + ], + "type": "inline_equation", + "content": "a_i" + }, + { + "bbox": [ + 104, + 629, + 506, + 730 + ], + "type": "text", + "content": " be an action that can be executed on a webpage through a browser navigation engine " + }, + { + "bbox": [ + 104, + 629, + 506, + 730 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 104, + 629, + 506, + 730 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 104, + 629, + 506, + 730 + ], + "type": "inline_equation", + "content": "o_{i+1} = B(o_i, a_i)" + }, + { + "bbox": [ + 104, + 629, + 506, + 730 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 629, + 506, + 730 + ], + "type": "inline_equation", + "content": "r_i" + }, + { + "bbox": [ + 104, + 629, + 506, + 730 + ], + "type": "text", + "content": " be the reasoning for choosing the action. We define a web agent trajectory as the sequence " + }, + { + "bbox": [ + 104, + 629, + 506, + 730 + ], + "type": "inline_equation", + "content": "\\mathcal{T} = \\{o_1, (r_1, a_1), o_2, (r_2, a_2), \\ldots, o_{n-1}, (r_{n-1}, a_{n-1}), o_n\\}" + }, + { + "bbox": [ + 104, + 629, + 506, + 730 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 629, + 506, + 730 + ], + "type": "inline_equation", + "content": "o_n" + }, + { + "bbox": [ + 104, + 629, + 506, + 730 + ], + "type": "text", + "content": " is the final observation in the trajectory. Each observation contains a screenshot of the browser " + }, + { + "bbox": [ + 104, + 629, + 506, + 730 + ], + "type": "inline_equation", + "content": "s_i" + }, + { + "bbox": [ + 104, + 629, + 506, + 730 + ], + "type": "text", + "content": ", the Document Object Model (DOM) tree representation of the browser, and an accessibility (A11Y) tree rendered from the DOM tree. For the observation to be useful for an LLM agent, we need a representation function " + }, + { + "bbox": [ + 104, + 629, + 506, + 730 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 104, + 629, + 506, + 730 + ], + "type": "text", + "content": " that produces " + }, + { + "bbox": [ + 104, + 629, + 506, + 730 + ], + "type": "inline_equation", + "content": "p_i = R(o_1, r_1, a_1, \\ldots, o_i)" + }, + { + "bbox": [ + 104, + 629, + 506, + 730 + ], + "type": "text", + "content": ", which can be used as an input for an LLM. If the agent is multimodal, " + }, + { + "bbox": [ + 104, + 629, + 506, + 730 + ], + "type": "inline_equation", + "content": "o_i" + }, + { + "bbox": [ + 104, + 629, + 506, + 730 + ], + "type": "text", + "content": " would include" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 113, + 80, + 497, + 189 + ], + "blocks": [ + { + "bbox": [ + 113, + 80, + 497, + 189 + ], + "lines": [ + { + "bbox": [ + 113, + 80, + 497, + 189 + ], + "spans": [ + { + "bbox": [ + 113, + 80, + 497, + 189 + ], + "type": "image", + "image_path": "fea37e571b8202015c318f34a31dad1d1cf79bfd93e97315862a68c6d58c4491.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 197, + 504, + 239 + ], + "lines": [ + { + "bbox": [ + 104, + 197, + 504, + 239 + ], + "spans": [ + { + "bbox": [ + 104, + 197, + 504, + 239 + ], + "type": "text", + "content": "Figure 2: AGENTREWARDBENCH creation process. We first collect trajectories from LLM agents inside web environments using instructions from several benchmarks. Then, the trajectories are reviewed by expert annotators, who indicate if the trajectory is successful, led to side effects, and contains repetition cycles. Finally, we use the annotated trajectories to evaluate LLM judges." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 270, + 504, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 270, + 504, + 316 + ], + "spans": [ + { + "bbox": [ + 104, + 270, + 504, + 316 + ], + "type": "text", + "content": "screenshots; otherwise, it would be a textual representation of the page (e.g., accessibility tree). Then, " + }, + { + "bbox": [ + 104, + 270, + 504, + 316 + ], + "type": "inline_equation", + "content": "p_i" + }, + { + "bbox": [ + 104, + 270, + 504, + 316 + ], + "type": "text", + "content": " is given to a language model to produce a completion " + }, + { + "bbox": [ + 104, + 270, + 504, + 316 + ], + "type": "inline_equation", + "content": "c_i = \\mathrm{LM}(p_i)" + }, + { + "bbox": [ + 104, + 270, + 504, + 316 + ], + "type": "text", + "content": ", or " + }, + { + "bbox": [ + 104, + 270, + 504, + 316 + ], + "type": "inline_equation", + "content": "c_i = \\mathrm{VLM}(p_i, s_i)" + }, + { + "bbox": [ + 104, + 270, + 504, + 316 + ], + "type": "text", + "content": " in the case of a multimodal LLM. The completion is parsed by an execution function " + }, + { + "bbox": [ + 104, + 270, + 504, + 316 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 104, + 270, + 504, + 316 + ], + "type": "text", + "content": " to produce " + }, + { + "bbox": [ + 104, + 270, + 504, + 316 + ], + "type": "inline_equation", + "content": "(a_i, r_i) = E(c_i)" + }, + { + "bbox": [ + 104, + 270, + 504, + 316 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 322, + 504, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 322, + 504, + 378 + ], + "spans": [ + { + "bbox": [ + 104, + 322, + 504, + 378 + ], + "type": "text", + "content": "Annotation Design For each trajectory, an expert annotator reviews a goal " + }, + { + "bbox": [ + 104, + 322, + 504, + 378 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 322, + 504, + 378 + ], + "type": "text", + "content": " and sequence " + }, + { + "bbox": [ + 104, + 322, + 504, + 378 + ], + "type": "inline_equation", + "content": "\\{s_1, (r_1, a_1), \\ldots, s_{n-1}, (r_{n-1}, a_{n-1}), s_n\\}" + }, + { + "bbox": [ + 104, + 322, + 504, + 378 + ], + "type": "text", + "content": " in order to answer questions " + }, + { + "bbox": [ + 104, + 322, + 504, + 378 + ], + "type": "inline_equation", + "content": "\\mathcal{Q} = \\{q_1, \\ldots, q_m\\}" + }, + { + "bbox": [ + 104, + 322, + 504, + 378 + ], + "type": "text", + "content": ". We consider the answers produced, " + }, + { + "bbox": [ + 104, + 322, + 504, + 378 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^* = \\{a_1^*, \\ldots, a_m^*\\}" + }, + { + "bbox": [ + 104, + 322, + 504, + 378 + ], + "type": "text", + "content": ", as the ground truth annotations for the trajectory, which indicate whether the agent successfully completed " + }, + { + "bbox": [ + 104, + 322, + 504, + 378 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 322, + 504, + 378 + ], + "type": "text", + "content": ". To collect " + }, + { + "bbox": [ + 104, + 322, + 504, + 378 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^*" + }, + { + "bbox": [ + 104, + 322, + 504, + 378 + ], + "type": "text", + "content": ", we use the following " + }, + { + "bbox": [ + 104, + 322, + 504, + 378 + ], + "type": "inline_equation", + "content": "\\mathcal{Q}" + }, + { + "bbox": [ + 104, + 322, + 504, + 378 + ], + "type": "text", + "content": " in the annotation guidelines:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 120, + 392, + 504, + 478 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 121, + 392, + 444, + 404 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 392, + 444, + 404 + ], + "spans": [ + { + "bbox": [ + 121, + 392, + 444, + 404 + ], + "type": "text", + "content": "1. Success: Was the sequence of actions successful in achieving the goal?" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 120, + 417, + 504, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 417, + 504, + 440 + ], + "spans": [ + { + "bbox": [ + 120, + 417, + 504, + 440 + ], + "type": "text", + "content": "2. Side Effect: Did the agent perform unnecessary actions that could lead to unintended side effects?" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 120, + 454, + 504, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 454, + 504, + 478 + ], + "spans": [ + { + "bbox": [ + 120, + 454, + 504, + 478 + ], + "type": "text", + "content": "3. Repetition Cycle: Did the agent loop through a sequence of actions that did not make progress towards the goal?" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 491, + 504, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 491, + 504, + 568 + ], + "spans": [ + { + "bbox": [ + 104, + 491, + 504, + 568 + ], + "type": "text", + "content": "Agreement with respect to success is the primary criterion with which we evaluate LLM judges. The remaining can be useful as auxiliary criteria for detecting issues ahead of time. For example, if an agent purchases several irrelevant products when the user only requested one, then the trajectory would be flagged for side effects, independent of task success. A judge can also indicate the presence of a cycle, for example, if the agent repeatedly clicks on a disabled button. Both signals can be used to penalize the agent during training or steer it to another action at inference." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 576, + 504, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 576, + 504, + 643 + ], + "spans": [ + { + "bbox": [ + 104, + 576, + 504, + 643 + ], + "type": "text", + "content": "Annotation Setup The team of annotators consisted of 6 experts with a deep understanding of the tasks and environments through their research on web agents. They used a custom-built user interface that displays each trajectory with screenshots, actions, and reasoning. Rating disagreements were resolved by annotators discussing among themselves until clear annotations can be produced for ambiguous trajectories. Moreover, the annotators also have access to the environment and accessibility trees when screenshots are insufficient." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 649, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 649, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 649, + 506, + 733 + ], + "type": "text", + "content": "Judge Model Given a goal " + }, + { + "bbox": [ + 104, + 649, + 506, + 733 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 649, + 506, + 733 + ], + "type": "text", + "content": ", trajectory " + }, + { + "bbox": [ + 104, + 649, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 104, + 649, + 506, + 733 + ], + "type": "text", + "content": " and questions " + }, + { + "bbox": [ + 104, + 649, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathcal{Q}" + }, + { + "bbox": [ + 104, + 649, + 506, + 733 + ], + "type": "text", + "content": ", a judge model returns a judgment " + }, + { + "bbox": [ + 104, + 649, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\hat{\\mathcal{A}}" + }, + { + "bbox": [ + 104, + 649, + 506, + 733 + ], + "type": "text", + "content": ", which is an estimate of " + }, + { + "bbox": [ + 104, + 649, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^*" + }, + { + "bbox": [ + 104, + 649, + 506, + 733 + ], + "type": "text", + "content": ". We can use " + }, + { + "bbox": [ + 104, + 649, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\hat{\\mathcal{A}}" + }, + { + "bbox": [ + 104, + 649, + 506, + 733 + ], + "type": "text", + "content": " to derive a reward in RL or to automatically evaluate web agents when " + }, + { + "bbox": [ + 104, + 649, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^*" + }, + { + "bbox": [ + 104, + 649, + 506, + 733 + ], + "type": "text", + "content": " is unavailable. To implement the judge, we need a judge-specific function " + }, + { + "bbox": [ + 104, + 649, + 506, + 733 + ], + "type": "inline_equation", + "content": "R_j" + }, + { + "bbox": [ + 104, + 649, + 506, + 733 + ], + "type": "text", + "content": " that produces a representation of the trajectory, " + }, + { + "bbox": [ + 104, + 649, + 506, + 733 + ], + "type": "inline_equation", + "content": "p = R_j(o_1, r_1, a_1, \\ldots, o_n)" + }, + { + "bbox": [ + 104, + 649, + 506, + 733 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 649, + 506, + 733 + ], + "type": "inline_equation", + "content": "R_j =" + }, + { + "bbox": [ + 104, + 649, + 506, + 733 + ], + "type": "text", + "content": " can vary substantially, ranging from a simple list of actions " + }, + { + "bbox": [ + 104, + 649, + 506, + 733 + ], + "type": "inline_equation", + "content": "a_1, \\ldots, a_{n-1}" + }, + { + "bbox": [ + 104, + 649, + 506, + 733 + ], + "type": "text", + "content": ", to using another LLM to process the observation history. We describe judges used in previous works and introduce a simplified judge in Section 4 and provide supplementary details in Section A.3." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 242, + 92 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 242, + 92 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 242, + 92 + ], + "type": "text", + "content": "3.2 Tasks and Environments" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 102, + 506, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 102, + 506, + 158 + ], + "spans": [ + { + "bbox": [ + 104, + 102, + 506, + 158 + ], + "type": "text", + "content": "We select 5 benchmarks designed to evaluate web agents inside dedicated environments and real websites, including general-purpose (Zhou et al., 2024), vision-focused (Koh et al., 2024), real-world (Yoran et al., 2024), and enterprise-oriented (Drouin et al., 2024; Boisvert et al., 2025) tasks. In total, we curate 351 unique tasks across 8 environments and 66 websites, which we separate into 51 development and 300 test tasks (details in Section A.1)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 164, + 506, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 164, + 506, + 233 + ], + "spans": [ + { + "bbox": [ + 104, + 164, + 506, + 233 + ], + "type": "text", + "content": "WebArena (WA; Zhou et al. 2024) This benchmark comprises 6 self-hosted websites covering a wide range of domains: customer relationship management, map navigation, online encyclopedia, shopping site, social forum, and software development collaboration platform. Each environment is derived from real open-source projects that develop self-hosted environments for both commercial and personal usage. Each task consists of a textual goal that requires a good understanding of one or multiple environments to complete." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 239, + 505, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 239, + 505, + 307 + ], + "spans": [ + { + "bbox": [ + 104, + 239, + 505, + 307 + ], + "type": "text", + "content": "VisualWebArena (VWA; Koh et al. 2024) To complement WebArena's text-based goals, we also include VisualWebArena (VWA), a benchmark focusing on tasks that require visual reasoning to complete. For instance, a user may include an image alongside the goal, or the task could be designed to only be solved if the agent selects an item with a unique visual characteristic (e.g., purchasing a TV with the widest bezel). VWA also introduces a new online marketplace environment (Classifieds)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 312, + 504, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 312, + 504, + 357 + ], + "spans": [ + { + "bbox": [ + 104, + 312, + 504, + 357 + ], + "type": "text", + "content": "AssistantBench (AB; Yoran et al. 2024) In addition to the self-hosted environments, we consider trajectories resulting from agent execution on real-world websites. This benchmark defines tasks that require navigating the internet, starting from a search engine. Since the test set is private, we use the validation set, which consists of 33 unique tasks." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 364, + 505, + 375 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 364, + 505, + 375 + ], + "spans": [ + { + "bbox": [ + 104, + 364, + 505, + 375 + ], + "type": "text", + "content": "WorkArena (Work; Drouin et al. 2024) and WorkArena++ (Wk++; Boisvert et al. 2025)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 375, + 505, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 375, + 505, + 455 + ], + "spans": [ + { + "bbox": [ + 104, + 375, + 505, + 455 + ], + "type": "text", + "content": "To increase the diversity of tasks relevant to professional environments, we incorporate WorkArena (Boisvert et al., 2025), a benchmark of 18 basic tasks on ServiceNow, a software-as-a-service platform for professional workflows in the information technology (IT), human resources, and customer management domains. WorkArena++ introduces tasks with greater complexity, requiring planning and reasoning to correctly complete multiple sub-tasks. Including this alongside WorkArena allows us to evaluate judges on a wider range of task difficulty. We focus on the Level 2 tasks since Level 3 is too challenging for current agents." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 465, + 221, + 478 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 465, + 221, + 478 + ], + "spans": [ + { + "bbox": [ + 105, + 465, + 221, + 478 + ], + "type": "text", + "content": "3.3 Web Agents Design" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 485, + 504, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 485, + 504, + 510 + ], + "spans": [ + { + "bbox": [ + 104, + 485, + 504, + 510 + ], + "type": "text", + "content": "To collect trajectories on the 5 benchmarks, we design web agents using two models from major commercial providers and two open-weight LLMs." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 517, + 506, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 517, + 506, + 617 + ], + "spans": [ + { + "bbox": [ + 104, + 517, + 506, + 617 + ], + "type": "text", + "content": "LLM backbones On the commercial side, we use OpenAI's " + }, + { + "bbox": [ + 104, + 517, + 506, + 617 + ], + "type": "inline_equation", + "content": "GPT-4o^2" + }, + { + "bbox": [ + 104, + 517, + 506, + 617 + ], + "type": "text", + "content": " (Hurst et al., 2024) and Anthropic's Claude 3.7 Sonnet (Anthropic, 2024b). They are the flagship models of their respective providers, both of which offer computer-use agents powered by their LLMs, namely OpenAI Operator (OpenAI, 2025) and Anthropic Claude's Computer use (Anthropic, 2024a). We select two leading open-weights LLMs to complement the commercial LLMs: Llama-3.3-70B (Grattafori et al., 2024) and Qwen2.5-VL (Bai et al., 2025). In both cases, we choose the instruction-tuned variant, which have undergone post-training for tool-use or UI navigation. Moreover, since Llama-3.3 is a text-only model, it was excluded from VisualWebArena, which requires image-based reasoning." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 624, + 506, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 624, + 506, + 669 + ], + "spans": [ + { + "bbox": [ + 104, + 624, + 506, + 669 + ], + "type": "text", + "content": "Agent Platform By default, each LLM backbone receives an input processed by a representation function " + }, + { + "bbox": [ + 104, + 624, + 506, + 669 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 104, + 624, + 506, + 669 + ], + "type": "text", + "content": " and generates a completion " + }, + { + "bbox": [ + 104, + 624, + 506, + 669 + ], + "type": "inline_equation", + "content": "c_{i}" + }, + { + "bbox": [ + 104, + 624, + 506, + 669 + ], + "type": "text", + "content": ". Then, " + }, + { + "bbox": [ + 104, + 624, + 506, + 669 + ], + "type": "inline_equation", + "content": "c_{i}" + }, + { + "bbox": [ + 104, + 624, + 506, + 669 + ], + "type": "text", + "content": " is interpreted as an action by an execution function " + }, + { + "bbox": [ + 104, + 624, + 506, + 669 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 104, + 624, + 506, + 669 + ], + "type": "text", + "content": ". To implement " + }, + { + "bbox": [ + 104, + 624, + 506, + 669 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 104, + 624, + 506, + 669 + ], + "type": "text", + "content": ", we use AgentLab and BrowserGym (Chezelles et al., 2025), an ecosystem for designing web agents using LLMs (details in Section A.1)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 675, + 506, + 699 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 675, + 506, + 699 + ], + "spans": [ + { + "bbox": [ + 104, + 675, + 506, + 699 + ], + "type": "text", + "content": "Trajectory Annotations and Splits We collect a total of 1302 trajectories from our 4 LLM-based web agents across five benchmarks. Based on the task split (§3.2), 196 trajectories are" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 116, + 709, + 269, + 720 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 709, + 269, + 720 + ], + "spans": [ + { + "bbox": [ + 116, + 709, + 269, + 720 + ], + "type": "text", + "content": "1https://developer.servicenow.com" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 118, + 721, + 277, + 731 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 721, + 277, + 731 + ], + "spans": [ + { + "bbox": [ + 118, + 721, + 277, + 731 + ], + "type": "text", + "content": "2We use the version gpt-4o-2024-11-20" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 110, + 80, + 503, + 258 + ], + "blocks": [ + { + "bbox": [ + 110, + 80, + 503, + 258 + ], + "lines": [ + { + "bbox": [ + 110, + 80, + 503, + 258 + ], + "spans": [ + { + "bbox": [ + 110, + 80, + 503, + 258 + ], + "type": "table", + "html": "
CategoryJudgeOverallABVWAWA PrecisionWorkWk++
PrecisionRecallF1
OfficialRule-based*83.855.967.125.085.279.0100.083.3
ExistingAER-C67.771.969.783.356.068.8100.066.7
AER-V67.671.569.583.361.267.696.459.3
NNetNav52.582.464.120.854.554.377.343.2
Ours (A)Claude 3.7 S.68.881.674.787.561.069.385.066.7
GPT-4o69.883.175.977.863.070.294.663.0
GPT-4o Mini61.586.171.780.057.963.584.249.4
Llama 3.367.779.072.975.059.668.294.362.7
Qwen2.5-VL64.389.875.072.759.363.687.260.3
Ours (S)Claude 3.7 S.69.476.372.771.464.869.385.366.7
GPT-4o68.180.373.777.860.769.993.859.6
GPT-4o Mini64.578.370.880.057.466.990.354.8
Qwen2.5-VL64.586.173.770.058.562.993.864.4
", + "image_path": "307853ee995fe2de7274f47458079ca5894b5e3b2a5641a08611b24b94c6ed5a.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 266, + 504, + 299 + ], + "lines": [ + { + "bbox": [ + 104, + 266, + 504, + 299 + ], + "spans": [ + { + "bbox": [ + 104, + 266, + 504, + 299 + ], + "type": "text", + "content": "Table 1: Judge performance for predicting success, measured with precision (§4.2). We report recall and F1 as auxiliary scores. We examine two variants of the simplified judge: one with the final accessibility tree (A), and the other with the final screenshot (S). *Rule-based evaluation are included for reference." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 329, + 506, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 329, + 506, + 388 + ], + "spans": [ + { + "bbox": [ + 104, + 329, + 506, + 388 + ], + "type": "text", + "content": "in the development split and 1106 are in the test split (details in A.2). The annotators follow the process described in Section 3.1 to label all trajectories, producing a total of 3906 binary annotations. To assess agreement between annotators, we annotated the GPT-4o agent's trajectory on WebArena with a second annotator. We obtained an inter-annotator agreement of " + }, + { + "bbox": [ + 104, + 329, + 506, + 388 + ], + "type": "inline_equation", + "content": "89.3\\%" + }, + { + "bbox": [ + 104, + 329, + 506, + 388 + ], + "type": "text", + "content": " on success, indicating a high level of consistency among annotators." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 399, + 269, + 415 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 399, + 269, + 415 + ], + "spans": [ + { + "bbox": [ + 104, + 399, + 269, + 415 + ], + "type": "text", + "content": "4 LLM judges for web tasks" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 424, + 239, + 437 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 424, + 239, + 437 + ], + "spans": [ + { + "bbox": [ + 104, + 424, + 239, + 437 + ], + "type": "text", + "content": "4.1 Judge implementations" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 444, + 506, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 444, + 506, + 534 + ], + "spans": [ + { + "bbox": [ + 104, + 444, + 506, + 534 + ], + "type": "text", + "content": "We consider two existing implementations of LLM judges for web agents, Agent Eval Refine (AER; Pan et al. 2024) and NNetNav (Murty et al., 2025), and introduce a simplified judge that simultaneously predicts success, side effects, and repetition. In Agent-as-a-Judge (Zhuge et al., 2024), the method assumes the judge can interact with the environment after the agent finishes executing actions, which isn't feasible when the environment state cannot be preserved or shareable across agents. Other LLM judge variants were proposed (He et al., 2024; Putta et al., 2024; Lai et al., 2024; Trabucco et al., 2025), but our three judge implementations cover major strategies for representing trajectories." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 540, + 506, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 540, + 506, + 608 + ], + "spans": [ + { + "bbox": [ + 104, + 540, + 506, + 608 + ], + "type": "text", + "content": "AER (Pan et al., 2024) The judge in this framework takes as input the sequence of agent thoughts and actions alongside the final browser state, which is either passed to a vision-enabled model as a screenshot (AER-V) or as a caption generated by a captioner model (AER-C). Then, the judge outputs its reasoning before predicting success or failure. For both the judge and captioner, we implement this method using GPT-4o, which is an overall stronger model than the GPT-4 (Achiam et al., 2023) model originally used." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 613, + 506, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 613, + 506, + 681 + ], + "spans": [ + { + "bbox": [ + 104, + 613, + 506, + 681 + ], + "type": "text", + "content": "NNNetNav (Murty et al., 2025) In this work, a Llama 3.1 70B judge receives a summary of changes across all observations and has to give a rating between 1 (worst) and 5 (best) after providing the thought process; the rating is binarized by thresholding at 4, based on the original implementation. To generate summaries, an LLM is used to describe the change between two observations based on the accessibility trees instead of screenshots. We use Llama 3.3 70B (Al-Dahle, 2024), an improved version of the original backbone." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 687, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 506, + 734 + ], + "type": "text", + "content": "Simplified judge (ours) We propose a simplified design for our judge. First, it directly answers the three questions asked to the annotators. This allows it to return multiple labels within a single completion. Then, we decouple the system prompt and reasoning chain from the final state representation, allowing the judge to receive either the accessibility tree or the" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 126, + 80, + 483, + 152 + ], + "blocks": [ + { + "bbox": [ + 126, + 80, + 483, + 152 + ], + "lines": [ + { + "bbox": [ + 126, + 80, + 483, + 152 + ], + "spans": [ + { + "bbox": [ + 126, + 80, + 483, + 152 + ], + "type": "table", + "html": "
A11YScreenSuccessSide EffectRepetition
PRF1PRF1PRF1
62.181.770.66.531.910.892.516.828.4
X61.586.171.77.270.813.078.646.458.3
X64.578.370.86.631.911.092.318.530.8
XX60.773.966.77.276.413.278.159.167.3
", + "image_path": "785c9e954ce36bf5a7ab213d6ff6ad7916337d2e98fbdbfea2cb611e1577c5dd.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 160, + 504, + 183 + ], + "lines": [ + { + "bbox": [ + 104, + 160, + 504, + 183 + ], + "spans": [ + { + "bbox": [ + 104, + 160, + 504, + 183 + ], + "type": "text", + "content": "Table 2: Ablation study of our GPT-4o mini judge, measured in precision (P), recall (R), and F1. We consider how including accessibility trees and screenshots in the input affects the predictions." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 205, + 504, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 205, + 504, + 251 + ], + "spans": [ + { + "bbox": [ + 104, + 205, + 504, + 251 + ], + "type": "text", + "content": "screenshot. This differs from AER, which requires a vision-enabled model, and NNetNav, which requires a long-context model capable of receiving multiple accessibility trees. Our method is compatible with both multimodal and text-only LLMs and does not require a separate LLM to caption the screenshot or summarize changes across observations." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 261, + 180, + 272 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 261, + 180, + 272 + ], + "spans": [ + { + "bbox": [ + 105, + 261, + 180, + 272 + ], + "type": "text", + "content": "4.2 Evaluation" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 281, + 506, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 281, + 506, + 381 + ], + "spans": [ + { + "bbox": [ + 104, + 281, + 506, + 381 + ], + "type": "text", + "content": "To evaluate LLM judges, we use the precision score, which is the ratio of true positives over all predicted positives (true + false positives). The metric is a good fit for rejection finetuning (RFT), where we are interested in increasing the number of true positives (actual successful trajectories) while reducing the number of false positives (failed trajectories added to the dataset due to poor LLM judgments). For reward modeling, we also want to prioritize true positives since they are the primary signals for many RL algorithms, while false positives should be minimized to avoid introducing noise to the loss function. Moreover, recall and F1 benefit from minimizing false negatives, which is useful for improving sample efficiency by reducing the number of valid trajectories removed; we report them as auxiliary metrics." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 392, + 217, + 403 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 392, + 217, + 403 + ], + "spans": [ + { + "bbox": [ + 105, + 392, + 217, + 403 + ], + "type": "text", + "content": "4.3 Judge Performance" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 411, + 504, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 411, + 504, + 478 + ], + "spans": [ + { + "bbox": [ + 104, + 411, + 504, + 478 + ], + "type": "text", + "content": "In Table 1, we provide an overview of the performance of judges across benchmarks using the metrics defined in Section 4.2. We find that GPT-40 and Claude 3.7 Sonnet-based simplified judges achieve higher precision compared to prior approaches, indicating that removing the internal LLMs for captioning or summarizing changes does not hinder their capabilities. Notably, no judge consistently stands out across benchmarks, highlighting the importance of selecting an appropriate LLM backbone based on the nature of the task." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 485, + 506, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 485, + 506, + 574 + ], + "spans": [ + { + "bbox": [ + 104, + 485, + 506, + 574 + ], + "type": "text", + "content": "Low precision limits existing judges We notice that no judge achieves above " + }, + { + "bbox": [ + 104, + 485, + 506, + 574 + ], + "type": "inline_equation", + "content": "70\\%" + }, + { + "bbox": [ + 104, + 485, + 506, + 574 + ], + "type": "text", + "content": " precision, which means that " + }, + { + "bbox": [ + 104, + 485, + 506, + 574 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 104, + 485, + 506, + 574 + ], + "type": "text", + "content": " of trajectories are erroneously marked as successful. This severely limits the usefulness of the judges for downstream applications, such as using the filtered trajectories for finetuning an agent, as the agent will learn to generate incorrect trajectories for a substantial portion of the tasks. This indicates LLM judges are currently not a reliable way of assessing the true capabilities of agents. Consequently, judges will need to achieve higher precision before they are useful for automatic evaluation, which also affects their downstream utility for methods like RFT and RL." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 580, + 506, + 702 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 580, + 506, + 702 + ], + "spans": [ + { + "bbox": [ + 104, + 580, + 506, + 702 + ], + "type": "text", + "content": "Official rule-based evaluation underestimates success Similar to LLM judges, the rule-based evaluation used by benchmarks can be compared with expert annotations. Since they use task-specific configurations to determine success, they may reject successful trajectories due to inconsequential differences. For instance, in WebArena, if a user asks \"What's the closest national park to the largest city in Maine?\", the agent may reply: \"The closest national park to Portland [...] is Acadia National Park\". Rule-based evaluation considers it unsuccessful since the configuration requires it to exactly match \"Acadia National Park\". As a result, the rule-based approach achieves a recall of " + }, + { + "bbox": [ + 104, + 580, + 506, + 702 + ], + "type": "inline_equation", + "content": "55.9\\%" + }, + { + "bbox": [ + 104, + 580, + 506, + 702 + ], + "type": "text", + "content": ", indicating a higher rate of false negatives compared to LLM judges. Overall, a substantial precision gap exists between rule-based methods and LLM judges, but rule-based methods severely underestimate the true performance of web agents, highlighting the need for more flexible automatic evaluation." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "content": "Impact of Input Representation Browser screenshots represent an intuitive state for humans, but LLMs may need more than vision alone, as screenshots miss page structure" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 301, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 301, + 750, + 309, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 111, + 80, + 499, + 153 + ], + "blocks": [ + { + "bbox": [ + 111, + 80, + 499, + 153 + ], + "lines": [ + { + "bbox": [ + 111, + 80, + 499, + 153 + ], + "spans": [ + { + "bbox": [ + 111, + 80, + 499, + 153 + ], + "type": "table", + "html": "
AgentHumanGPT-4o JudgeRule-based
VWAWAWk++VWAWAWk++VWAWAWk++
Claude 3.7 S.28.355.118.434.864.120.723.930.88.1
GPT-4o35.942.318.447.850.011.517.425.64.6
Llama 3.30.022.49.20.027.65.80.018.43.5
Qwen2.5-VL21.733.313.834.852.614.917.429.511.5
", + "image_path": "5632a9bac292c4bee36a707181d78d6ad711c2e7f7fcd6e2c8dde1e3c012afc3.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 161, + 504, + 183 + ], + "lines": [ + { + "bbox": [ + 105, + 161, + 504, + 183 + ], + "spans": [ + { + "bbox": [ + 105, + 161, + 504, + 183 + ], + "type": "text", + "content": "Table 3: Success Rate of web agents measured by expert annotators, GPT-4o Judge (with accessibility tree) and rule-based evaluation on various benchmarks (§3.2). Results by environment are in Table 6." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 212, + 506, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 212, + 506, + 280 + ], + "spans": [ + { + "bbox": [ + 104, + 212, + 506, + 280 + ], + "type": "text", + "content": "and hidden attributes found in accessibility trees. To investigate the impact of different representations, we ablate our GPT-4o-mini simplified judge in Table 2. We observe that only including screenshots achieves a high precision for success and repetition, whereas only including accessibility trees allows higher recall. Surprisingly, including both accessibility trees and screenshots yields a lower performance than including only the screenshot, indicating that more information distracts rather than assists the judge." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 293, + 369, + 308 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 293, + 369, + 308 + ], + "spans": [ + { + "bbox": [ + 105, + 293, + 369, + 308 + ], + "type": "text", + "content": "5 Revisiting how we evaluate task success rate" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 317, + 506, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 317, + 506, + 384 + ], + "spans": [ + { + "bbox": [ + 104, + 317, + 506, + 384 + ], + "type": "text", + "content": "One of the core applications of LLM judges is to estimate the success rate on a web navigation benchmark, which is useful in scenarios where there are no dedicated functions to calculate the rule-based success rate, which is the standard evaluation for many web agent benchmarks. However, rule-based approaches may not always agree with experts. In Table 3, we compare the success rate calculated from expert annotations, rule-based evaluation, and a GPT-4o judge with accessibility trees." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 391, + 506, + 524 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 391, + 506, + 524 + ], + "spans": [ + { + "bbox": [ + 104, + 391, + 506, + 524 + ], + "type": "text", + "content": "Rule-based evaluation does not reflect expert-defined success rates We notice a stark difference between the judge and rule-based approach: whereas the LLM judge tends to overestimate the success rate of every agent (with two exceptions in WorkArena++) rule-based methods consistently underestimate it. Moreover, the underestimation varies substantially, with the performance of GPT-4o being " + }, + { + "bbox": [ + 104, + 391, + 506, + 524 + ], + "type": "inline_equation", + "content": "16.7\\%" + }, + { + "bbox": [ + 104, + 391, + 506, + 524 + ], + "type": "text", + "content": " lower on WebArena and " + }, + { + "bbox": [ + 104, + 391, + 506, + 524 + ], + "type": "inline_equation", + "content": "18.5\\%" + }, + { + "bbox": [ + 104, + 391, + 506, + 524 + ], + "type": "text", + "content": " lower on VWA compared to expert annotations. This highlights a major discrepancy between the official task success rate reported by rule-based methods and the success rate according to expert annotators. For instance, rule-based evaluation ranks Qwen2.5-VL above GPT-4o on WebArena and WorkArena++ (and equally on VWA), whereas expert annotators prefer GPT-4o over Qwen2.5-VL on all benchmarks, with over " + }, + { + "bbox": [ + 104, + 391, + 506, + 524 + ], + "type": "inline_equation", + "content": "14\\%" + }, + { + "bbox": [ + 104, + 391, + 506, + 524 + ], + "type": "text", + "content": " higher success rate on VWA. Overall, this stresses the need to develop new methods to calculate task success rate that more precisely reflect expert judgments." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 538, + 205, + 552 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 538, + 205, + 552 + ], + "spans": [ + { + "bbox": [ + 105, + 538, + 205, + 552 + ], + "type": "text", + "content": "6 Error Analysis" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 561, + 504, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 561, + 504, + 586 + ], + "spans": [ + { + "bbox": [ + 104, + 561, + 504, + 586 + ], + "type": "text", + "content": "In this section, we qualitatively examine failure cases of LLM judges. Following a GPT-4o judge, we focus on the common error categories to understand how LLM judges fail." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 591, + 506, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 591, + 506, + 703 + ], + "spans": [ + { + "bbox": [ + 104, + 591, + 506, + 703 + ], + "type": "text", + "content": "Grounding mismatch When an agent misunderstands what is happening on the screen, its thought process may not reflect the reality of the webpage. In such cases, a judge without access to the screenshots may produce an erroneous judgment due to the agent misunderstanding what is happening on the screen. For example, when a user requests to show \"the product page for the item in the second row, second column,\" the Qwen2.5-VL agent ends up selecting the second item in the first row. However, it writes in its reasoning chain that \"Based on the layout of the page, the second row, second column item is the [energy Drink].\" The judge accepts the agent's thought process without realizing it opened the wrong page: \"The agent's goal was to navigate to the product page for the item in the second row, second column. The agent successfully reached the correct product page.\"" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 709, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 506, + 734 + ], + "type": "text", + "content": "Misleading agent reasoning The agent may have misleading elements in its reasoning, leading the judge to reason that the agent completed the task correctly. In a multi-step" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 80, + 313, + 240 + ], + "blocks": [ + { + "bbox": [ + 111, + 80, + 313, + 240 + ], + "lines": [ + { + "bbox": [ + 111, + 80, + 313, + 240 + ], + "spans": [ + { + "bbox": [ + 111, + 80, + 313, + 240 + ], + "type": "image", + "image_path": "8dca0db1b855d122e7c1547c3afdb6ea23586839146460b25215fd8db956728c.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 248, + 504, + 289 + ], + "lines": [ + { + "bbox": [ + 104, + 248, + 504, + 289 + ], + "spans": [ + { + "bbox": [ + 104, + 248, + 504, + 289 + ], + "type": "text", + "content": "Figure 3: Example of judge error (discussed in Section 6). In this example, the user requests the agent to buy cereal in VisualWebArena, but the agent stops after adding it to the cart. The judge erroneously identified the trajectory as successful, even though the agent missed an important detail in the instruction." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 318, + 82, + 500, + 240 + ], + "blocks": [ + { + "bbox": [ + 318, + 82, + 500, + 240 + ], + "lines": [ + { + "bbox": [ + 318, + 82, + 500, + 240 + ], + "spans": [ + { + "bbox": [ + 318, + 82, + 500, + 240 + ], + "type": "image", + "image_path": "bd9a757f9b7f9157f128559ec0caf9545154edd153e70d195365a1a3307480bb.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 319, + 504, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 319, + 504, + 376 + ], + "spans": [ + { + "bbox": [ + 104, + 319, + 504, + 376 + ], + "type": "text", + "content": "WorkArena++ task, the user requested the agent to apply a search filter to include a unique ID. After several unsuccessful attempts, the agent ended up stating it succeeded in its reasoning chain, even though no filter was applied. The judge was misled by the agent and wrote in its own reasoning chain that \"The agent successfully [...] applied the filter to extract entries...\" before incorrectly indicating that the trajectory was successful." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 381, + 506, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 381, + 506, + 472 + ], + "spans": [ + { + "bbox": [ + 104, + 381, + 506, + 472 + ], + "type": "text", + "content": "Missed instruction details In some cases, the agent does not complete the task completely, missing crucial details from the instruction (see Figure 3). For example, when the user requests to \"buy the cheapest cereal with a graphic character on the box in the Cereals category,\" the agent finds the correct product and informs the customer: \"I've found the cheapest cereal with a graphic character on the box. It's Cocoa Puffs, 10.4 Oz Box...\" However, it missed a crucial detail: the user requested that they buy the product. Unfortunately, the judge mistakenly believes that the agent completed a purchase: \"The agent successfully identified and purchased the cheapest cereal with a graphic character on the box...\"" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 477, + 504, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 477, + 504, + 578 + ], + "spans": [ + { + "bbox": [ + 104, + 477, + 504, + 578 + ], + "type": "text", + "content": "Misunderstanding action intents We found that the agent may misuse certain actions, leading to the trajectory to fail very close to completion. In such cases, the LLM judge may incorrectly decide that the trajectory is successful and ignore the misused action. In one instance where the goal was to answer \"What is the minimum travel time by car from Animal Rescue League of Pittsburgh to Schenley park?\", the Qwen2.5-VL agent completes all required actions, but ends up reporting the task as unfeasible instead of replying to the user. The GPT-4o judge (with screenshot) correctly reasons that the travel time was shown on the screen, but does not point out that reporting the task as unfeasible is incorrect, instead asserting that \"all actions were confined to the task of finding the travel time.\"" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "spans": [ + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "text", + "content": "Overall, current LLM judges are limited by their capability to detect nuanced issues within trajectories, as shown by the judge missing details and misunderstanding an action. Moreover, they will easily agree with the agent's reasoning even when it is wrong, which has been previously observed in LLMs (Sharma et al., 2023). Future research should aim to address these issues to improve the performance of LLM judges for evaluating web agents." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 653, + 189, + 665 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 653, + 189, + 665 + ], + "spans": [ + { + "bbox": [ + 105, + 653, + 189, + 665 + ], + "type": "text", + "content": "7 Conclusion" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 676, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 676, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 676, + 504, + 733 + ], + "type": "text", + "content": "We introduce AGENTREWARDBENCH, a benchmark for evaluating LLM judges for web agent trajectories. The benchmark consists of over 1300 trajectories, each annotated by experts across three dimensions: whether the agent succeeded, whether it caused unintended side effects, and whether it repeated unnecessary actions. We evaluate 12 LLM judges on AGENTREWARDBENCH and find that simpler input representation can achieve" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 751, + 308, + 759 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 759 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 759 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 161 + ], + "type": "text", + "content": "higher agreement with expert annotators compared to prior approaches. Moreover, we find that rule-based evaluation, often used by environment-based benchmarks, does not achieve a lower-than-expected agreement with experts. Instead, it tends to reject many valid trajectories, which results in the success rate of certain web agents being lower than what an expert would perceive. Overall, we believe our benchmark will help researchers design better LLM judges for web agents trajectories, which will enable the design of automatic evaluators and reward models that better reflect expert judgments." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 175, + 211, + 189 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 175, + 211, + 189 + ], + "spans": [ + { + "bbox": [ + 105, + 175, + 211, + 189 + ], + "type": "text", + "content": "Acknowledgments" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 198, + 506, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 198, + 506, + 243 + ], + "spans": [ + { + "bbox": [ + 104, + 198, + 506, + 243 + ], + "type": "text", + "content": "Xing Han Lu acknowledges the support of the Natural Sciences and Engineering Research Council of Canada (NSERC) [funding reference no. 579403]. The project is supported by the Google-Mila grant. We thank Alexandre Lacoste, Shikhar Murty, and the McGill NLP group members for helpful discussions." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 257, + 168, + 270 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 257, + 168, + 270 + ], + "spans": [ + { + "bbox": [ + 106, + 257, + 168, + 270 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 281, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 106, + 281, + 506, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 281, + 506, + 326 + ], + "spans": [ + { + "bbox": [ + 106, + 281, + 506, + 326 + ], + "type": "text", + "content": "Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023. URL https://arxiv.org/abs/2303.08774." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 331, + 492, + 344 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 331, + 492, + 344 + ], + "spans": [ + { + "bbox": [ + 107, + 331, + 492, + 344 + ], + "type": "text", + "content": "Adept. Act-1: Transformer for actions, 2022. URL https://www.adept.ai/blog/act-1/." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 349, + 506, + 372 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 349, + 506, + 372 + ], + "spans": [ + { + "bbox": [ + 107, + 349, + 506, + 372 + ], + "type": "text", + "content": "Ahmad Al-Dahle. The future of ai: Built with llama, December 2024. URL https://ai.meta.com/blog/future-of-ai-built-with-llama/." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 377, + 506, + 400 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 377, + 506, + 400 + ], + "spans": [ + { + "bbox": [ + 107, + 377, + 506, + 400 + ], + "type": "text", + "content": "Anthropic. Introducing computer use, a new claude 3.5 sonnet, and claude 3.5 haiku, 2024a. URL https://www.anthropic.com/news/3-5-models-and-computer-use." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 406, + 506, + 429 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 406, + 506, + 429 + ], + "spans": [ + { + "bbox": [ + 107, + 406, + 506, + 429 + ], + "type": "text", + "content": "Anthropic. The claude 3 model family: Opus, sonnet, haiku, 2024b. URL https://apisemantic scholar.org/CorpusID:268232499." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 434, + 506, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 434, + 506, + 491 + ], + "spans": [ + { + "bbox": [ + 106, + 434, + 506, + 491 + ], + "type": "text", + "content": "Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, Humen Zhong, Yuanzhi Zhu, Mingkun Yang, Zhaohai Li, Jianqiang Wan, Pengfei Wang, Wei Ding, Zheren Fu, Yiheng Xu, Jiabo Ye, Xi Zhang, Tianbao Xie, Zesen Cheng, Hang Zhang, Zhibo Yang, Haiyang Xu, and Junyang Lin. Qwen2.5-vl technical report, 2025. URL https://arxiv.org/abs/2502.13923." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 496, + 504, + 541 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 496, + 504, + 541 + ], + "spans": [ + { + "bbox": [ + 106, + 496, + 504, + 541 + ], + "type": "text", + "content": "Léo Boisvert, Megh Thakkar, Maxime Gasse, Massimo Caccia, Thibault Le Sellier De Chezelles, Quentin Cappart, Nicolas Chapados, Alexandre Lacoste, and Alexandre Drouin. Workarena++: Towards compositional planning and reasoning-based common knowledge work tasks, 2025. URL https://arxiv.org/abs/2407.05291." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 546, + 506, + 571 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 546, + 506, + 571 + ], + "spans": [ + { + "bbox": [ + 105, + 546, + 506, + 571 + ], + "type": "text", + "content": "Greg Brockman, Vicki Cheung, Ludwig Pettersson, Jonas Schneider, John Schulman, Jie Tang, and Wojciech Zaremba. Openai gym, 2016. URL https://arxiv.org/abs/1606.01540." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 575, + 506, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 575, + 506, + 620 + ], + "spans": [ + { + "bbox": [ + 106, + 575, + 506, + 620 + ], + "type": "text", + "content": "Dongping Chen, Ruoxi Chen, Shilin Zhang, Yinuo Liu, Yaochen Wang, Huichi Zhou, Qihui Zhang, Yao Wan, Pan Zhou, and Lichao Sun. Mllm-as-a-judge: Assessing multimodal llm-as-a-judge with vision-language benchmark, 2024. URL https://arxiv.org/abs/2402.04788." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 625, + 506, + 693 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 625, + 506, + 693 + ], + "spans": [ + { + "bbox": [ + 106, + 625, + 506, + 693 + ], + "type": "text", + "content": "Thibault Le Sellier De Chezelles, Maxime Gasse, Alexandre Drouin, Massimo Caccia, Léo Boisvert, Megh Thakkar, Tom Marty, Rim Assouel, Sahar Omidi Shayegan, Lawrence Keunho Jang, Xing Han Lu, Ori Yoran, Dehan Kong, Frank F. Xu, Siva Reddy, Quentin Cappart, Graham Neubig, Ruslan Salakhutdinov, Nicolas Chapados, and Alexandre Lacoste. The browsergym ecosystem for web agent research, 2025. URL https://arxiv.org/abs/2412.05467." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 698, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 698, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 106, + 698, + 504, + 732 + ], + "type": "text", + "content": "Xiang Deng, Yu Gu, Boyuan Zheng, Shijie Chen, Sam Stevens, Boshi Wang, Huan Sun, and Yu Su. Mind2web: Towards a generalist agent for the web. Advances in Neural Information Processing Systems, 36:28091-28114, 2023." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 106, + 81, + 505, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 81, + 505, + 128 + ], + "spans": [ + { + "bbox": [ + 106, + 81, + 505, + 128 + ], + "type": "text", + "content": "Alexandre Drouin, Maxime Gasse, Massimo Caccia, Issam H. Laradji, Manuel Del Verme, Tom Marty, Léo Boisvert, Megh Thakkar, Quentin Cappart, David Vazquez, Nicolas Chapados, and Alexandre Lacoste. Workarena: How capable are web agents at solving common knowledge work tasks?, 2024. URL https://arxiv.org/abs/2403.07718." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 135, + 506, + 170 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 135, + 506, + 170 + ], + "spans": [ + { + "bbox": [ + 106, + 135, + 506, + 170 + ], + "type": "text", + "content": "Aarash Feizi, Sai Rajeswar, Adriana Romero-Soriano, Reihaneh Rabbany, Valentina Zantedeschi, Spandana Gella, and João Monteiro. Pairbench: Are vision-language models reliable at comparing what they see?, 2025. URL https://arxiv.org/abs/2502.15210." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 177, + 506, + 212 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 177, + 506, + 212 + ], + "spans": [ + { + "bbox": [ + 106, + 177, + 506, + 212 + ], + "type": "text", + "content": "Aaron Grattafori, Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Alex Vaughan, et al. The llama 3 herd of models. arXiv preprint, 2024. URL https://arxiv.org/abs/2407.21783." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 219, + 504, + 244 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 219, + 504, + 244 + ], + "spans": [ + { + "bbox": [ + 105, + 219, + 504, + 244 + ], + "type": "text", + "content": "Izzeddin Gur, Ulrich Rueckert, Aleksandra Faust, and Dilek Hakkani-Tur. Learning to navigate the web. arXiv preprint arXiv:1812.09195, 2018." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 251, + 506, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 251, + 506, + 285 + ], + "spans": [ + { + "bbox": [ + 106, + 251, + 506, + 285 + ], + "type": "text", + "content": "Izzeddin Gur, Hiroki Furuta, Austin Huang, Mustafa Safdari, Yutaka Matsuo, Douglas Eck, and Aleksandra Faust. A real-world webagent with planning, long context understanding, and program synthesis. arXiv preprint arXiv:2307.12856, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 293, + 506, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 293, + 506, + 338 + ], + "spans": [ + { + "bbox": [ + 106, + 293, + 506, + 338 + ], + "type": "text", + "content": "Hongliang He, Wenlin Yao, Kaixin Ma, Wenhao Yu, Yong Dai, Hongming Zhang, Zhenzhong Lan, and Dong Yu. Webvoyager: Building an end-to-end web agent with large multimodal models. ArXiv, abs/2401.13919, 2024. URL https://api-semanticscholar.org/CorpusID:267211622." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 346, + 506, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 346, + 506, + 392 + ], + "spans": [ + { + "bbox": [ + 106, + 346, + 506, + 392 + ], + "type": "text", + "content": "Peter C Humphreys, David Raposo, Toby Pohlen, Gregory Thornton, Rachita Chhaparia, Alistair Muldal, Josh Abramson, Petko Georgiev, Alex Goldin, Adam Santoro, and Timothy Lillicrap. A data-driven approach for learning to control computers, 2022. URL https://arxiv.org/abs/2202.08137." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 399, + 506, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 399, + 506, + 435 + ], + "spans": [ + { + "bbox": [ + 106, + 399, + 506, + 435 + ], + "type": "text", + "content": "Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 442, + 506, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 442, + 506, + 487 + ], + "spans": [ + { + "bbox": [ + 105, + 442, + 506, + 487 + ], + "type": "text", + "content": "Jing Yu Koh, Robert Lo, Lawrence Jang, Vikram Duvvur, Ming Chong Lim, Po-Yu Huang, Graham Neubig, Shuyan Zhou, Ruslan Salakhutdinov, and Daniel Fried. VisualWebArena: Evaluating Multimodal Agents on Realistic Visual Web Tasks, June 2024. URL http://arxiv.org/abs/2401.13649. arXiv:2401.13649 [cs]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 495, + 506, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 495, + 506, + 540 + ], + "spans": [ + { + "bbox": [ + 106, + 495, + 506, + 540 + ], + "type": "text", + "content": "Hanyu Lai, Xiao Liu, Iat Long Iong, Shuntian Yao, Yuxuan Chen, Pengbo Shen, Hao Yu, Hanchen Zhang, Xiaohan Zhang, Yuxiao Dong, and Jie Tang. Autowebglm: A large language model-based web navigating agent, 2024. URL https://arxiv.org/abs/2404.03648." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 548, + 506, + 594 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 548, + 506, + 594 + ], + "spans": [ + { + "bbox": [ + 106, + 548, + 506, + 594 + ], + "type": "text", + "content": "Nathan Lambert, Valentina Pyatkin, Jacob Morrison, LJ Miranda, Bill Yuchen Lin, Khyathi Chandu, Nouha Dziri, Sachin Kumar, Tom Zick, Yejin Choi, Noah A. Smith, and Hannaneh Hajishirzi. Rewardbench: Evaluating reward models for language modeling, 2024. URL https://arxiv.org/abs/2403.13787." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 601, + 506, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 601, + 506, + 658 + ], + "spans": [ + { + "bbox": [ + 106, + 601, + 506, + 658 + ], + "type": "text", + "content": "Evan Zheran Liu, Kelvin Guu, Panupong Pasupat, Tianlin Shi, and Percy Liang. Reinforcement Learning on Web Interfaces using Workflow-guided Exploration. In 6th International Conference on Learning Representations, ICLR 2018, Vancouver, BC, Canada, April 30 - May 3, 2018, Conference Track Proceedings, 2018. URL https://openreview.net/forum?id=ryTp3f-0-." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 666, + 504, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 666, + 504, + 690 + ], + "spans": [ + { + "bbox": [ + 106, + 666, + 504, + 690 + ], + "type": "text", + "content": "Xing Han Lu, Zdenek Kasner, and Siva Reddy. Weblinx: Real-world website navigation with multi-turn dialogue. arXiv preprint arXiv:2402.05930, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 697, + 506, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 697, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 106, + 697, + 506, + 732 + ], + "type": "text", + "content": "Shikhar Murty, Christopher Manning, Peter Shaw, Mandar Joshi, and Kenton Lee. BAGEL: Bootstrapping Agents by Guiding Exploration with Language, June 2024. URL http://arxiv.org/abs/2403.08140.arXiv:2403.08140 [cs]." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 81, + 507, + 732 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 106, + 81, + 507, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 81, + 507, + 116 + ], + "spans": [ + { + "bbox": [ + 106, + 81, + 507, + 116 + ], + "type": "text", + "content": "Shikhar Murty, Hao Zhu, Dzmitry Bahdanau, and Christopher D. Manning. Nnetnav: Unsupervised learning of browser agents through environment interaction in the wild, 2025. URL https://arxiv.org/abs/2410.02907." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 122, + 507, + 159 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 122, + 507, + 159 + ], + "spans": [ + { + "bbox": [ + 106, + 122, + 507, + 159 + ], + "type": "text", + "content": "Reiichiro Nakano, Jacob Hilton, Suchir Balaji, Jeff Wu, Long Ouyang, Christina Kim, Christopher Hesse, Shantanu Jain, Vineet Kosaraju, William Saunders, et al. Webgpt: Browser-assisted question-answering with human feedback. arXiv preprint arXiv:2112.09332, 2021." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 165, + 505, + 189 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 165, + 505, + 189 + ], + "spans": [ + { + "bbox": [ + 106, + 165, + 505, + 189 + ], + "type": "text", + "content": "OpenAI. Introducing operator, January 2025. URL https://openai.com/index/introducing-operator." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 196, + 507, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 196, + 507, + 229 + ], + "spans": [ + { + "bbox": [ + 106, + 196, + 507, + 229 + ], + "type": "text", + "content": "Jiayi Pan, Yichi Zhang, Nicholas Tomlin, Yifei Zhou, Sergey Levine, and Alane Suhr. Autonomous evaluation and refinement of digital agents, 2024. URL https://arxiv.org/abs/2404.06474." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 237, + 507, + 272 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 237, + 507, + 272 + ], + "spans": [ + { + "bbox": [ + 106, + 237, + 507, + 272 + ], + "type": "text", + "content": "Ajay Patel, Markus Hofmacher, Claudiu Leoveanu-Condrei, Marius-Constantin Dinu, Chris Callison-Burch, and Sepp Hochreiter. Large language models can self-improve at web agent tasks, 2024. URL https://arxiv.org/abs/2405.20309." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 279, + 507, + 313 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 279, + 507, + 313 + ], + "spans": [ + { + "bbox": [ + 106, + 279, + 507, + 313 + ], + "type": "text", + "content": "Pranav Putta, Edmund Mills, Naman Garg, Sumeet Motwani, Chelsea Finn, Divyansh Garg, and Rafael Rafailov. Agent q: Advanced reasoning and learning for autonomous ai agents, 2024. URL https://arxiv.org/abs/2408.07199." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 320, + 505, + 355 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 320, + 505, + 355 + ], + "spans": [ + { + "bbox": [ + 106, + 320, + 505, + 355 + ], + "type": "text", + "content": "Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36:53728-53741, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 361, + 507, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 361, + 507, + 418 + ], + "spans": [ + { + "bbox": [ + 106, + 361, + 507, + 418 + ], + "type": "text", + "content": "Mrinank Sharma, Meg Tong, Tomasz Korbak, David Duvenaud, Amanda Askell, Samuel R. Bowman, Newton Cheng, Esin Durmus, Zac Hatfield-Dodds, Scott R. Johnston, Shauna Kravec, Timothy Maxwell, Sam McCandlish, Kamal Ndousse, Oliver Rausch, Nicholas Schiefer, Da Yan, Miranda Zhang, and Ethan Perez. Towards understanding sycophancy in language models, 2023. URL https://arxiv.org/abs/2310.13548." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 426, + 507, + 482 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 426, + 507, + 482 + ], + "spans": [ + { + "bbox": [ + 106, + 426, + 507, + 482 + ], + "type": "text", + "content": "Peter Shaw, Mandar Joshi, James Cohan, Jonathan Berant, Panupong Pasupat, Hexiang Hu, Urvashi Khandelwal, Kenton Lee, and Kristina Toutanova. From pixels to UI actions: Learning to follow instructions via graphical user interfaces. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=3PjCt4kmRx." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 489, + 507, + 524 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 489, + 507, + 524 + ], + "spans": [ + { + "bbox": [ + 106, + 489, + 507, + 524 + ], + "type": "text", + "content": "Tianlin Shi, Andrej Karpathy, Linxi Fan, Jonathan Hernandez, and Percy Liang. World of bits: An open-domain platform for web-based agents. In International Conference on Machine Learning, pp. 3135-3144. PMLR, 2017." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 531, + 507, + 576 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 531, + 507, + 576 + ], + "spans": [ + { + "bbox": [ + 106, + 531, + 507, + 576 + ], + "type": "text", + "content": "Robert St. Amant and Luke S. Zettlemoyer. The user interface as an agent environment. In Proceedings of the Fourth International Conference on Autonomous Agents, AGENTS '00, pp. 483-490, New York, NY, USA, 2000. Association for Computing Machinery. ISBN 1581132301. doi: 10.1145/336595.337575. URL https://doi.org/10.1145/336595.337575." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 583, + 507, + 617 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 583, + 507, + 617 + ], + "spans": [ + { + "bbox": [ + 106, + 583, + 507, + 617 + ], + "type": "text", + "content": "Brandon Trabucco, Gunnar Sigurdsson, Robinson Piramuthu, and Ruslan Salakhutdinov. Towards internet-scale training for agents, 2025. URL https://arxiv.org/abs/2502.06776." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 625, + 507, + 659 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 625, + 507, + 659 + ], + "spans": [ + { + "bbox": [ + 106, + 625, + 507, + 659 + ], + "type": "text", + "content": "Shunyu Yao, Howard Chen, John Yang, and Karthik Narasimhan. WebShop: Towards Scalable Real-world Web Interaction with Grounded Language Agents. In NeurIPS, 2022. URL https://arxiv.org/abs/2207.01206." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 667, + 507, + 701 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 667, + 507, + 701 + ], + "spans": [ + { + "bbox": [ + 106, + 667, + 507, + 701 + ], + "type": "text", + "content": "Ori Yoran, Samuel Joseph Amouyal, Chaitanya Malaviya, Ben Bogin, Ofir Press, and Jonathan Berant. Assistantbench: Can web agents solve realistic and time-consuming tasks?, 2024. URL https://arxiv.org/abs/2407.15711." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 708, + 507, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 708, + 507, + 732 + ], + "spans": [ + { + "bbox": [ + 106, + 708, + 507, + 732 + ], + "type": "text", + "content": "Boyuan Zheng, Boyu Gou, Jihyung Kil, Huan Sun, and Yu Su. Gpt-4v(ision) is a generalist web agent, if grounded, 2024. URL https://arxiv.org/abs/2401.01614." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 81, + 505, + 231 + ], + "type": "list", + "angle": 0, + "index": 3, + "blocks": [ + { + "bbox": [ + 106, + 81, + 505, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 81, + 505, + 127 + ], + "spans": [ + { + "bbox": [ + 106, + 81, + 505, + 127 + ], + "type": "text", + "content": "Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric P. Xing, Hao Zhang, Joseph E. Gonzalez, and Ion Stoica. Judging llm-as-a-judge with mt-bench and chatbot arena, 2023. URL https://arxiv.org/abs/2306.05685." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 133, + 505, + 178 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 133, + 505, + 178 + ], + "spans": [ + { + "bbox": [ + 106, + 133, + 505, + 178 + ], + "type": "text", + "content": "Shuyan Zhou, Frank F. Xu, Hao Zhu, Xuhui Zhou, Robert Lo, Abishek Sridhar, Xianyi Cheng, Tianyue Ou, Yonatan Bisk, Daniel Fried, Uri Alon, and Graham Neubig. WebArena: A Realistic Web Environment for Building Autonomous Agents, April 2024. URL http://arxiv.org/abs/2307.13854. arXiv:2307.13854 [cs]." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 185, + 505, + 231 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 185, + 505, + 231 + ], + "spans": [ + { + "bbox": [ + 106, + 185, + 505, + 231 + ], + "type": "text", + "content": "Mingchen Zhuge, Changsheng Zhao, Dylan Ashley, Wenyi Wang, Dmitrii Khizbullin, Yunyang Xiong, Zechun Liu, Ernie Chang, Raghuraman Krishnamoorthi, Yuandong Tian, Yangyang Shi, Vikas Chandra, and Jürgen Schmidhuber. Agent-as-a-judge: Evaluate agents with agents, 2024. URL https://arxiv.org/abs/2410.10934." + } + ] + } + ], + "index": 2 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 192, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 192, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 192, + 94 + ], + "type": "text", + "content": "A Benchmark" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 105, + 308, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 105, + 308, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 105, + 308, + 118 + ], + "type": "text", + "content": "A.1 Environment and Experiments Details" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 125, + 504, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 125, + 504, + 192 + ], + "spans": [ + { + "bbox": [ + 104, + 125, + 504, + 192 + ], + "type": "text", + "content": "AssistantBench Although an unlimited number of websites can be visited, we observed that the agents visited a total of 66 unique domains between 1 and 129 times across all trajectories we collected. The number of times a domain was visited can be found in Table 4. Additionally, we replace the default search engine with an alternative search engine (https://duckduckgo.com) as the original homepage blocks browser automation, which renders the tasks unachievable." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 198, + 506, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 198, + 506, + 277 + ], + "spans": [ + { + "bbox": [ + 104, + 198, + 506, + 277 + ], + "type": "text", + "content": "Tasks Subgroups We define the subgroup for WebArena and VisualWebArena as the combination of web domain and evaluation method from the original works. The evaluation methods consist of string matching, HTML-based programs, webpage image querying, and final URL matching. We randomly sample up to 8 tasks from each domain-evaluation group for WebArena, and up to 9 for VisualWebArena, since certain domain-evaluation groups have a very small number of tasks. For WorkArena, we attempt to evenly distribute the task categories. As a result, we have the following task distributions:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 285, + 504, + 360 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 130, + 285, + 504, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 285, + 504, + 308 + ], + "spans": [ + { + "bbox": [ + 130, + 285, + 504, + 308 + ], + "type": "text", + "content": "- WebArena: Wikipedia (8), Map (18), Reddit (18), Shopping Admin (18), Shopping (19), Gitlab (19)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 311, + 481, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 311, + 481, + 323 + ], + "spans": [ + { + "bbox": [ + 130, + 311, + 481, + 323 + ], + "type": "text", + "content": "- VisualWebArena: Wikipedia (17), Reddit (27), Classifieds (28), Shopping (28)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 325, + 504, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 325, + 504, + 360 + ], + "spans": [ + { + "bbox": [ + 130, + 325, + 504, + 360 + ], + "type": "text", + "content": "- WorkArena: Sophisticated memory (15), Information retrieval (20), Contextual understanding infeasible tasks (21), Planning and problem solving (22), Data driven decision making and reasoning (22)" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 369, + 504, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 369, + 504, + 392 + ], + "spans": [ + { + "bbox": [ + 104, + 369, + 504, + 392 + ], + "type": "text", + "content": "Agent Hyperparameters The binary flags used in AgentLab (Chezelles et al., 2025) are shown in Table 5. We set a maximum limit of 40K input tokens and 8192 output tokens." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 399, + 506, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 399, + 506, + 498 + ], + "spans": [ + { + "bbox": [ + 104, + 399, + 506, + 498 + ], + "type": "text", + "content": "Agent Platform Implementation In addition to abstracting websites and browser engines into Gym-compatible environments (Brockman et al., 2016), BrowserGym (Drouin et al., 2024; Chezelles et al., 2025) offers advanced preprocessing of complex web inputs (i.e., DOM and accessibility trees) and can automatically parse LLM output and execute them as browser actions like clicks, form inputs, tab actions, etc. Additionally, the BrowserGym ecosystem includes AgentLab, a framework for processing input representation and managing web agent experiments. We use AgentLab to design our representation function " + }, + { + "bbox": [ + 104, + 399, + 506, + 498 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 104, + 399, + 506, + 498 + ], + "type": "text", + "content": ", ensuring unified hyperparameters and inputs. As a result, we can avoid unintended differences that may arise from customizing prompts and representations for each LLM." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 510, + 190, + 521 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 510, + 190, + 521 + ], + "spans": [ + { + "bbox": [ + 105, + 510, + 190, + 521 + ], + "type": "text", + "content": "A.2 Annotations" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 529, + 506, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 529, + 506, + 631 + ], + "spans": [ + { + "bbox": [ + 104, + 529, + 506, + 631 + ], + "type": "text", + "content": "Trajectory filtering In total, 351 tasks were considered across 5 benchmarks (33 in AssistantBench, 100 in VisualWebArena, 100 in WebArena, 18 in WorkArena, and 100 in WorkArena++) We collect trajectories from agents built from each of three multimodal models: Claude 3.7 Sonnet, GPT-4o, Qwen2.5-VL. Moreover, since Llama 3.3 is not multimodal, we only collect trajectories on 251 tasks (excluding VisualWebArena). Additionally, Llama 3.3 did not complete two WebArena tasks (nos. 735 and 805) due to timeout issues that consistently occurred in the environment, despite multiple attempts to restart. Thus, we obtain a total of 1302 trajectories, where 196 are stored in the development split and 1106 in the test split." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 635, + 504, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 635, + 504, + 703 + ], + "spans": [ + { + "bbox": [ + 104, + 635, + 504, + 703 + ], + "type": "text", + "content": "Interface To annotate the trajectories, we designed a fully customized annotation interface using Gradio (see Figure 4). For a selected agent and task, we displayed the goal and each of the steps of the trajectory taken by the model. It shows the model's reasoning and action, as well as a screenshot with the action element on overlay. Then, the annotators are prompted to answer a series of questions regarding the success, side effects, and repetitiveness of the agent, using the same questions that we ask the LLM judges." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "Shared Knowledge Given that the annotators are experts, it is possible that the annotators share knowledge of web agents that non-expert may not possess; we identify several shared" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 216 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 216 + ], + "type": "text", + "content": "knowledge facts. (1) web agent design and capabilities: the annotators are aware that the agents are designed with LLMs, some of which have multimodal capabilities, and that they are capable of generating reasoning traces to support actions, and that the LLMs may be subject to hallucination or may product repetitive sequences of text. (2) dedicated web environments: the annotators know that several the websites used in the project come from prior publications in the domain, including WebArena (Zhou et al., 2024), VisualWebArena (Koh et al., 2024), WorkArena (Drouin et al., 2024; Boisvert et al., 2025). They are aware that some of the websites are designed specific for the task, whereas others come from real-world websites. (3) Automatic Evaluation: the annotators know that the web environments employ automatic evaluation methods, such as string matching and URL matching, to evaluate the agents. Thus, a task that is successful or unsuccessful may terminate earlier, but the agent will not be guaranteed to receive a positive reward for that task." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 221, + 506, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 221, + 506, + 365 + ], + "spans": [ + { + "bbox": [ + 106, + 221, + 506, + 365 + ], + "type": "text", + "content": "Annotator agreements and disagreements resolution For most tasks, binary annotations can be produced. However, in some cases, the annotator may not be certain of their annotation, and are allowed to mark a trajectory as uncertain, which was subsequently reviewed by the other annotators. In some cases, annotators may disagree with their judgments. In general, a salient reason for mismatch is the ambiguity of the instructions. For example, a task instruction might mention \"buy a black t-shirt\", but may not specify if it is fully black or can have other graphics. In such cases, annotators are advised to go for the most lenient option. More generally, to ensure that annotators can easily voice their uncertainty and disagreement, the first half of the annotation was conducted in person with all annotators present concurrently. Thus, when an annotator was uncertain about the annotation for a trajectory, they can ask other annotators, who can deliberate about the correct annotation until a consensus is reached. This approach further allows other annotators to align to the consensus for the remaining annotations." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 376, + 189, + 388 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 376, + 189, + 388 + ], + "spans": [ + { + "bbox": [ + 105, + 376, + 189, + 388 + ], + "type": "text", + "content": "A.3 LLM Judges" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 396, + 506, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 396, + 506, + 431 + ], + "spans": [ + { + "bbox": [ + 104, + 396, + 506, + 431 + ], + "type": "text", + "content": "**Prompts** We use simple system prompt (Figure 5) and user message (Figure 6) templates without model-specific commands, allowing our prompt to be transferred to any LLM. We use distinct tags, such as and , to facilitate parsing the model output." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 436, + 506, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 436, + 506, + 471 + ], + "spans": [ + { + "bbox": [ + 104, + 436, + 506, + 471 + ], + "type": "text", + "content": "Results We report extended results for 10 LLM judges, with the overall results in Table 7 and the finegrained results in Table 8 over all agents; the unaggregated results are presented in Tables 9 to 12." + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 87, + 504, + 281 + ], + "blocks": [ + { + "bbox": [ + 106, + 87, + 504, + 281 + ], + "lines": [ + { + "bbox": [ + 106, + 87, + 504, + 281 + ], + "spans": [ + { + "bbox": [ + 106, + 87, + 504, + 281 + ], + "type": "image", + "image_path": "67479fd205ea8a95ae38b87aa49014397d48430a2207fb34bad96a8b193cd8f6.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 106, + 281, + 310, + 462 + ], + "blocks": [ + { + "bbox": [ + 106, + 281, + 310, + 462 + ], + "lines": [ + { + "bbox": [ + 106, + 281, + 310, + 462 + ], + "spans": [ + { + "bbox": [ + 106, + 281, + 310, + 462 + ], + "type": "image", + "image_path": "b4d93f4c536918e29a5020c6118dc73a1ced2f509d0c0f5cdc73963e81050241.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 153, + 469, + 457, + 483 + ], + "lines": [ + { + "bbox": [ + 153, + 469, + 457, + 483 + ], + "spans": [ + { + "bbox": [ + 153, + 469, + 457, + 483 + ], + "type": "text", + "content": "Figure 4: User Interface used by annotators for answering questions" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 316, + 281, + 504, + 462 + ], + "blocks": [ + { + "bbox": [ + 316, + 281, + 504, + 462 + ], + "lines": [ + { + "bbox": [ + 316, + 281, + 504, + 462 + ], + "spans": [ + { + "bbox": [ + 316, + 281, + 504, + 462 + ], + "type": "image", + "image_path": "3cfbbd1665745f18b8e63dd716304db7e7b8a9a40ac4b8cb12767659dbd37df2.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 121, + 505, + 489, + 700 + ], + "blocks": [ + { + "bbox": [ + 121, + 505, + 489, + 700 + ], + "lines": [ + { + "bbox": [ + 121, + 505, + 489, + 700 + ], + "spans": [ + { + "bbox": [ + 121, + 505, + 489, + 700 + ], + "type": "table", + "html": "
Domain#Domain#Domain#
duckduckgo.com129google.com112wizards.com24
blackbaudhosting.com21fedex.com17mtggoldfish.com17
usps.com12fidelity.ca12weather.gov10
yelp.com9linkedin.com9rottentomatoes.com9
nih.gov8tcgplayer.com8imdb.com8
yahoo.com7cagreatamerica.com7thedrinknation.com6
tripadvisor.com6express.dhl6californiagreatamerica.com5
seattlechildrensmuseum.org5monday.com5fubo.tv5
philamuseum.org5weatherspark.com5bing.com5
ensembl.org4wellhub.com4hubbioo.com3
wholefoodsmarket.com3alltrails.com3target.com2
andersmartialarts.com2wikipedia.org2sfyimby.com2
currentresults.com2stockanalysis.com2speakrj.com2
x.com2apple.com2extremeweatherwatch.com2
tmplclubs.com2sixflags.com1etf.com1
amazon.com1netflixreleases.com1weather-and-climate.com1
wunderground.com1redfin.com1talesofamountainmama.com1
themeparkcenter.com1seattleweatherblog.com1chromewebdata1
peacefoodnyc.com1sec.gov1calicolabs.com1
easyship.com1onlineshippingcalculator.com1tripadvisor.ca1
nyunews.com1fandango.com1aimobo.io1
anytots.com1morningstar.com1visitphilly.com1
", + "image_path": "79bcf03814eb760d5fc8e0eb52317dc44bb68f0d0eb22fcbf1d8db118ab2505d.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 212, + 710, + 398, + 720 + ], + "lines": [ + { + "bbox": [ + 212, + 710, + 398, + 720 + ], + "spans": [ + { + "bbox": [ + 212, + 710, + 398, + 720 + ], + "type": "text", + "content": "Table 4: AssistantBench Website Visit Counts" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 121, + 89, + 489, + 163 + ], + "blocks": [ + { + "bbox": [ + 121, + 89, + 489, + 163 + ], + "lines": [ + { + "bbox": [ + 121, + 89, + 489, + 163 + ], + "spans": [ + { + "bbox": [ + 121, + 89, + 489, + 163 + ], + "type": "table", + "html": "
ValueFlags
Trueuseifax, use_som, use-thinking, use_concrete_example, use_ABSTRACT_example, use_hints, be_cautious
Falseuse_html, usepast_error_logs, use Think_history, use_diff, filter Visible_elements_only, long_description, individual/examples, use_plan, use_criticise, use_memory, enable chatting
", + "image_path": "7f48a6d6dffbee0c753c2cd9ee9598cf1c13ea70fb6596793215d0a51a81fe4d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 138, + 206, + 473, + 483 + ], + "blocks": [ + { + "bbox": [ + 225, + 171, + 385, + 184 + ], + "lines": [ + { + "bbox": [ + 225, + 171, + 385, + 184 + ], + "spans": [ + { + "bbox": [ + 225, + 171, + 385, + 184 + ], + "type": "text", + "content": "Table 5: Agentlab Hyperparameters" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 138, + 206, + 473, + 483 + ], + "lines": [ + { + "bbox": [ + 138, + 206, + 473, + 483 + ], + "spans": [ + { + "bbox": [ + 138, + 206, + 473, + 483 + ], + "type": "table", + "html": "
BenchmarkAgentExpertLLM JudgeRule-based
AssistantBenchClaude 3.7 S.11.111.10.8
GPT-4o14.814.83.7
Llama 3.33.77.45.3
Qwen2.5-VL0.00.02.2
WebArenaClaude 3.7 S.55.164.130.8
GPT-4o42.350.025.6
Llama 3.322.427.618.4
Qwen2.5-VL33.352.629.5
VisualWebArenaClaude 3.7 S.28.334.823.9
GPT-4o35.947.817.4
Qwen2.5-VL21.734.817.4
WorkArenaClaude 3.7 S.68.868.850.0
GPT-4o50.056.250.0
Llama 3.356.250.056.2
Qwen2.5-VL56.256.256.2
WorkArena++Claude 3.7 S.18.420.78.1
GPT-4o18.411.54.6
Llama 3.39.25.83.5
Qwen2.5-VL13.814.911.5
OverallClaude 3.7 S.33.038.020.4
GPT-4o31.335.316.3
Llama 3.317.017.513.3
Qwen2.5-VL22.331.719.5
", + "image_path": "3fcd6d6c6c53ef5d655d6e0ff13f9bfff714b04e8a95435c83bca1a6b39cc8ae.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 121, + 527, + 491, + 690 + ], + "blocks": [ + { + "bbox": [ + 105, + 492, + 504, + 504 + ], + "lines": [ + { + "bbox": [ + 105, + 492, + 504, + 504 + ], + "spans": [ + { + "bbox": [ + 105, + 492, + 504, + 504 + ], + "type": "text", + "content": "Table 6: Success Rate by evaluation type. For the LLM judge, we use GPT-4o with accessibility trees." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 121, + 527, + 491, + 690 + ], + "lines": [ + { + "bbox": [ + 121, + 527, + 491, + 690 + ], + "spans": [ + { + "bbox": [ + 121, + 527, + 491, + 690 + ], + "type": "table", + "html": "
JudgeSuccessSide EffectRepetition
PRF1PRF1PRF1
AER-C67.771.969.7------
AER-V67.671.569.5------
Claude 3.7 S. (A)68.881.674.714.034.720.082.894.988.4
Claude 3.7 S. (S)69.476.372.714.144.421.482.094.587.8
Functional83.855.967.1------
GPT-4o (A)69.883.175.97.791.714.280.496.987.9
GPT-4o (S)68.180.373.77.590.313.879.296.286.9
GPT-4o Mini (A)61.586.171.77.270.813.078.646.458.3
GPT-4o Mini (S)64.578.370.86.631.911.092.318.530.8
Llama 3.3 (A)67.779.072.96.979.212.780.191.685.5
NNetNav52.582.464.1------
Qwen2.5-VL (A)64.389.875.09.055.615.488.172.679.6
Qwen2.5-VL (S)64.586.173.78.858.315.288.764.674.7
", + "image_path": "3b3b523d52ea3d449263831d809c2bca3078fa57ca50ab960a8cb6a967fc27a2.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 697, + 504, + 721 + ], + "lines": [ + { + "bbox": [ + 104, + 697, + 504, + 721 + ], + "spans": [ + { + "bbox": [ + 104, + 697, + 504, + 721 + ], + "type": "text", + "content": "Table 7: Results over all benchmarks by judge. We report the precision (P) as the primary metric, and F1 and recall (R) as the auxiliary metrics." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 115, + 102, + 495, + 668 + ], + "blocks": [ + { + "bbox": [ + 115, + 102, + 495, + 668 + ], + "lines": [ + { + "bbox": [ + 115, + 102, + 495, + 668 + ], + "spans": [ + { + "bbox": [ + 115, + 102, + 495, + 668 + ], + "type": "table", + "html": "
BenchmarkJudgeSuccessSide EffectRepetition
PRF1PRF1PRF1
AssistantBenchAER-C83.362.571.4------
AER-V83.362.571.4------
Claude 3.7 S. (A)87.587.587.57.133.311.870.194.080.3
Claude 3.7 S. (S)71.462.566.77.733.312.570.892.080.0
Functional25.012.516.7------
GPT-4o (A)77.887.582.43.2100.06.368.696.080.0
GPT-4o (S)77.887.582.43.2100.06.367.194.078.3
GPT-4o Mini (A)80.0100.088.92.766.75.259.332.041.6
GPT-4o Mini (S)80.0100.088.93.433.36.390.018.030.0
Llama 3.3 (A)75.075.075.03.4100.06.665.896.078.0
NNNetNav20.862.531.2------
Qwen2.5-VL (A)72.7100.084.20.00.00.073.862.067.4
Qwen2.5-VL (S)70.087.577.85.766.710.572.252.060.5
VisualWebArenaAER-C56.070.962.6------
AER-V61.279.769.2------
Claude 3.7 S. (A)61.077.268.216.735.722.778.796.886.8
Claude 3.7 S. (S)64.874.769.417.142.924.577.897.686.6
Functional85.258.269.2------
GPT-4o (A)63.086.172.712.489.321.875.298.485.2
GPT-4o (S)60.782.369.912.492.921.972.799.283.9
GPT-4o Mini (A)57.983.568.410.764.318.474.446.056.9
GPT-4o Mini (S)57.473.464.411.235.717.187.121.434.4
Llama 3.3 (A)59.674.766.312.082.121.074.492.182.3
NNNetNav54.569.661.1------
Qwen2.5-VL (A)59.388.671.117.271.427.886.973.879.8
Qwen2.5-VL (S)58.587.370.015.464.324.883.359.569.4
WebArenaAER-C68.883.275.3------
AER-V67.684.074.9------
Claude 3.7 S. (A)69.389.177.911.828.616.784.988.686.7
Claude 3.7 S. (S)69.389.177.914.838.121.380.885.182.9
Functional79.053.864.0------
GPT-4o (A)70.289.178.59.690.517.482.993.988.1
GPT-4o (S)69.989.978.77.976.214.382.792.187.1
GPT-4o Mini (A)63.590.874.710.171.417.770.339.550.6
GPT-4o Mini (S)66.986.675.57.528.611.986.711.420.1
Llama 3.3 (A)68.286.676.38.676.215.479.786.082.7
NNNetNav54.390.867.9------
Qwen2.5-VL (A)63.694.175.96.728.610.984.763.272.4
Qwen2.5-VL (S)62.992.474.89.438.115.190.550.064.4
WorkArenaAER-C100.081.189.6------
AER-V96.473.083.1------
Claude 3.7 S. (A)85.091.988.38.350.014.376.086.480.8
Claude 3.7 S. (S)85.378.481.70.00.00.077.377.377.3
Functional100.091.995.8------
GPT-4o (A)94.694.694.62.650.05.070.486.477.5
GPT-4o (S)93.881.187.05.3100.010.072.081.876.6
GPT-4o Mini (A)84.286.585.33.350.06.266.736.447.1
GPT-4o Mini (S)90.375.782.40.00.00.0100.018.230.8
Llama 3.3 (A)94.389.291.70.00.00.081.881.881.8
NNNetNav77.391.983.9------
Qwen2.5-VL (A)87.291.989.50.00.00.0100.059.174.3
Qwen2.5-VL (S)93.881.187.00.00.00.086.759.170.3
WorkArena++AER-C66.742.351.8------
AER-V59.330.840.5------
Claude 3.7 S. (A)66.762.764.717.138.923.787.597.492.2
Claude 3.7 S. (S)66.750.057.113.661.122.287.398.992.8
Functional83.338.552.6------
GPT-4o (A)63.055.859.25.5100.010.385.698.591.6
GPT-4o (S)59.653.856.65.5100.010.484.598.290.8
GPT-4o Mini (A)49.476.960.15.283.39.787.852.966.1
GPT-4o Mini (S)54.865.459.64.633.38.196.520.233.4
Llama 3.3 (A)62.761.562.14.783.38.986.793.890.1
NNNetNav43.278.855.8------
Qwen2.5-VL (A)60.378.868.37.577.813.791.979.085.0
Qwen2.5-VL (S)64.473.168.56.477.811.893.275.783.6
", + "image_path": "55cf8ed31386a867855e3097e6c2a90acd7ed00a9c2e4344bc24ab3f9d7afdf9.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 687, + 504, + 709 + ], + "lines": [ + { + "bbox": [ + 105, + 687, + 504, + 709 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 504, + 709 + ], + "type": "text", + "content": "Table 8: Finegrained results by benchmark and judge for all agents. We report the precision (P) as the primary metric, and F1 and recall (R) as the auxiliary metrics." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 115, + 101, + 496, + 670 + ], + "blocks": [ + { + "bbox": [ + 115, + 101, + 496, + 670 + ], + "lines": [ + { + "bbox": [ + 115, + 101, + 496, + 670 + ], + "spans": [ + { + "bbox": [ + 115, + 101, + 496, + 670 + ], + "type": "table", + "html": "
BenchmarkJudgeSuccessSide EffectRepetition
PRF1PRF1PRF1
AssistantBenchAER-C83.362.571.4------
AER-V83.362.571.4------
Claude 3.7 S. (A)87.587.587.57.133.311.870.194.080.3
Claude 3.7 S. (S)71.462.566.77.733.312.570.892.080.0
Functional25.012.516.7------
GPT-4o (A)77.887.582.43.2100.06.368.696.080.0
GPT-4o (S)77.887.582.43.2100.06.367.194.078.3
GPT-4o Mini (A)80.0100.088.92.766.75.259.332.041.6
GPT-4o Mini (S)80.0100.088.93.433.36.390.018.030.0
Llama 3.3 (A)75.075.075.03.4100.06.665.896.078.0
NNNetNav20.862.531.2------
Qwen2.5-VL (A)72.7100.084.20.00.00.073.862.067.4
Qwen2.5-VL (S)70.087.577.85.766.710.572.252.060.5
VisualWebArenaAER-C56.070.962.6------
AER-V61.279.769.2------
Claude 3.7 S. (A)61.077.268.216.735.722.778.796.886.8
Claude 3.7 S. (S)64.874.769.417.142.924.577.897.686.6
Functional85.258.269.2------
GPT-4o (A)63.086.172.712.489.321.875.298.485.2
GPT-4o (S)60.782.369.912.492.921.972.799.283.9
GPT-4o Mini (A)57.983.568.410.764.318.474.446.056.9
GPT-4o Mini (S)57.473.464.411.235.717.187.121.434.4
Llama 3.3 (A)59.674.766.312.082.121.074.492.182.3
NNNetNav54.569.661.1------
Qwen2.5-VL (A)59.388.671.117.271.427.886.973.879.8
Qwen2.5-VL (S)58.587.370.015.464.324.883.359.569.4
WebArenaAER-C68.883.275.3------
AER-V67.684.074.9------
Claude 3.7 S. (A)69.389.177.911.828.616.784.988.686.7
Claude 3.7 S. (S)69.389.177.914.838.121.380.885.182.9
Functional79.053.864.0------
GPT-4o (A)70.289.178.59.690.517.482.993.988.1
GPT-4o (S)69.989.978.77.976.214.382.792.187.1
GPT-4o Mini (A)63.590.874.710.171.417.770.339.550.6
GPT-4o Mini (S)66.986.675.57.528.611.986.711.420.1
Llama 3.3 (A)68.286.676.38.676.215.479.786.082.7
NNNetNav54.390.867.9------
Qwen2.5-VL (A)63.694.175.96.728.610.984.763.272.4
Qwen2.5-VL (S)62.992.474.89.438.115.190.550.064.4
WorkArenaAER-C100.081.189.6------
AER-V96.473.083.1------
Claude 3.7 S. (A)85.091.988.38.350.014.376.086.480.8
Claude 3.7 S. (S)85.378.481.70.00.00.077.377.377.3
Functional100.091.995.8------
GPT-4o (A)94.694.694.62.650.05.070.486.477.5
GPT-4o (S)93.881.187.05.3100.010.072.081.876.6
GPT-4o Mini (A)84.286.585.33.350.06.266.736.447.1
GPT-4o Mini (S)90.375.782.40.00.00.0100.018.230.8
Llama 3.3 (A)94.389.291.70.00.00.081.881.881.8
NNNetNav77.391.983.9------
Qwen2.5-VL (A)87.291.989.50.00.00.0100.059.174.3
Qwen2.5-VL (S)93.881.187.00.00.00.086.759.170.3
WorkArena++AER-C66.742.351.8------
AER-V59.330.840.5------
Claude 3.7 S. (A)66.762.764.717.138.923.787.597.492.2
Claude 3.7 S. (S)66.750.057.113.661.122.287.398.992.8
Functional83.338.552.6------
GPT-4o (A)63.055.859.25.5100.010.385.698.591.6
GPT-4o (S)59.653.856.65.5100.010.484.598.290.8
GPT-4o Mini (A)49.476.960.15.283.39.787.852.966.1
GPT-4o Mini (S)54.865.459.64.633.38.196.520.233.4
Llama 3.3 (A)62.761.562.14.783.38.986.793.890.1
NNNetNav43.278.855.8------
Qwen2.5-VL (A)60.378.868.37.577.813.791.979.085.0
Qwen2.5-VL (S)64.473.168.56.477.811.893.275.783.6
", + "image_path": "77007d4343c7aab2936fe569092fef7ca49b6622819e1c83f346774f57a1305e.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 687, + 504, + 709 + ], + "lines": [ + { + "bbox": [ + 105, + 687, + 504, + 709 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 504, + 709 + ], + "type": "text", + "content": "Table 9: Finegrained results by benchmark and judge for Qwen2.5-VL agent. We report the precision (P) as the primary metric, and F1 and recall (R) as the auxiliary metrics." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 115, + 102, + 495, + 668 + ], + "blocks": [ + { + "bbox": [ + 115, + 102, + 495, + 668 + ], + "lines": [ + { + "bbox": [ + 115, + 102, + 495, + 668 + ], + "spans": [ + { + "bbox": [ + 115, + 102, + 495, + 668 + ], + "type": "table", + "html": "
BenchmarkJudgeSuccessSide EffectRepetition
PRF1PRF1PRF1
AssistantBenchAER-C83.362.571.4------
AER-V83.362.571.4------
Claude 3.7 S. (A)87.587.587.57.133.311.870.194.080.3
Claude 3.7 S. (S)71.462.566.77.733.312.570.892.080.0
Functional25.012.516.7------
GPT-4o (A)77.887.582.43.2100.06.368.696.080.0
GPT-4o (S)77.887.582.43.2100.06.367.194.078.3
GPT-4o Mini (A)80.0100.088.92.766.75.259.332.041.6
GPT-4o Mini (S)80.0100.088.93.433.36.390.018.030.0
Llama 3.3 (A)75.075.075.03.4100.06.665.896.078.0
NNNetNav20.862.531.2------
Qwen2.5-VL (A)72.7100.084.20.00.00.073.862.067.4
Qwen2.5-VL (S)70.087.577.85.766.710.572.252.060.5
VisualWebArenaAER-C56.070.962.6------
AER-V61.279.769.2------
Claude 3.7 S. (A)61.077.268.216.735.722.778.796.886.8
Claude 3.7 S. (S)64.874.769.417.142.924.577.897.686.6
Functional85.258.269.2------
GPT-4o (A)63.086.172.712.489.321.875.298.485.2
GPT-4o (S)60.782.369.912.492.921.972.799.283.9
GPT-4o Mini (A)57.983.568.410.764.318.474.446.056.9
GPT-4o Mini (S)57.473.464.411.235.717.187.121.434.4
Llama 3.3 (A)59.674.766.312.082.121.074.492.182.3
NNNetNav54.569.661.1------
Qwen2.5-VL (A)59.388.671.117.271.427.886.973.879.8
Qwen2.5-VL (S)58.587.370.015.464.324.883.359.569.4
WebArenaAER-C68.883.275.3------
AER-V67.684.074.9------
Claude 3.7 S. (A)69.389.177.911.828.616.784.988.686.7
Claude 3.7 S. (S)69.389.177.914.838.121.380.885.182.9
Functional79.053.864.0------
GPT-4o (A)70.289.178.59.690.517.482.993.988.1
GPT-4o (S)69.989.978.77.976.214.382.792.187.1
GPT-4o Mini (A)63.590.874.710.171.417.770.339.550.6
GPT-4o Mini (S)66.986.675.57.528.611.986.711.420.1
Llama 3.3 (A)68.286.676.38.676.215.479.786.082.7
NNNetNav54.390.867.9------
Qwen2.5-VL (A)63.694.175.96.728.610.984.763.272.4
Qwen2.5-VL (S)62.992.474.89.438.115.190.550.064.4
WorkArenaAER-C100.081.189.6------
AER-V96.473.083.1------
Claude 3.7 S. (A)85.091.988.38.350.014.376.086.480.8
Claude 3.7 S. (S)85.378.481.70.00.00.077.377.377.3
Functional100.091.995.8------
GPT-4o (A)94.694.694.62.650.05.070.486.477.5
GPT-4o (S)93.881.187.05.3100.010.072.081.876.6
GPT-4o Mini (A)84.286.585.33.350.06.266.736.447.1
GPT-4o Mini (S)90.375.782.40.00.00.0100.018.230.8
Llama 3.3 (A)94.389.291.70.00.00.081.881.881.8
NNNetNav77.391.983.9------
Qwen2.5-VL (A)87.291.989.50.00.00.0100.059.174.3
Qwen2.5-VL (S)93.881.187.00.00.00.086.759.170.3
WorkArena++AER-C66.742.351.8------
AER-V59.330.840.5------
Claude 3.7 S. (A)66.762.764.717.138.923.787.597.492.2
Claude 3.7 S. (S)66.750.057.113.661.122.287.398.992.8
Functional83.338.552.6------
GPT-4o (A)63.055.859.25.5100.010.385.698.591.6
GPT-4o (S)59.653.856.65.5100.010.484.598.290.8
GPT-4o Mini (A)49.476.960.15.283.39.787.852.966.1
GPT-4o Mini (S)54.865.459.64.633.38.196.520.233.4
Llama 3.3 (A)62.761.562.14.783.38.986.793.890.1
NNNetNav43.278.855.8------
Qwen2.5-VL (A)60.378.868.37.577.813.791.979.085.0
Qwen2.5-VL (S)64.473.168.56.477.811.893.275.783.6
", + "image_path": "7f2421a723f50fa8df7200781d075cb0c69a98bd3470aca8dc2f648844c87aab.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 687, + 504, + 709 + ], + "lines": [ + { + "bbox": [ + 105, + 687, + 504, + 709 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 504, + 709 + ], + "type": "text", + "content": "Table 10: Finegrained results by benchmark and judge for Llama 3.3 agent. We report the precision (P) as the primary metric, and F1 and recall (R) as the auxiliary metrics." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 115, + 101, + 496, + 670 + ], + "blocks": [ + { + "bbox": [ + 115, + 101, + 496, + 670 + ], + "lines": [ + { + "bbox": [ + 115, + 101, + 496, + 670 + ], + "spans": [ + { + "bbox": [ + 115, + 101, + 496, + 670 + ], + "type": "table", + "html": "
BenchmarkJudgeSuccessSide EffectRepetition
PRF1PRF1PRF1
AssistantBenchAER-C83.362.571.4------
AER-V83.362.571.4------
Claude 3.7 S. (A)87.587.587.57.133.311.870.194.080.3
Claude 3.7 S. (S)71.462.566.77.733.312.570.892.080.0
Functional25.012.516.7------
GPT-4o (A)77.887.582.43.2100.06.368.696.080.0
GPT-4o (S)77.887.582.43.2100.06.367.194.078.3
GPT-4o Mini (A)80.0100.088.92.766.75.259.332.041.6
GPT-4o Mini (S)80.0100.088.93.433.36.390.018.030.0
Llama 3.3 (A)75.075.075.03.4100.06.665.896.078.0
NNNetNav20.862.531.2------
Qwen2.5-VL (A)72.7100.084.20.00.00.073.862.067.4
Qwen2.5-VL (S)70.087.577.85.766.710.572.252.060.5
VisualWebArenaAER-C56.070.962.6------
AER-V61.279.769.2------
Claude 3.7 S. (A)61.077.268.216.735.722.778.796.886.8
Claude 3.7 S. (S)64.874.769.417.142.924.577.897.686.6
Functional85.258.269.2------
GPT-4o (A)63.086.172.712.489.321.875.298.485.2
GPT-4o (S)60.782.369.912.492.921.972.799.283.9
GPT-4o Mini (A)57.983.568.410.764.318.474.446.056.9
GPT-4o Mini (S)57.473.464.411.235.717.187.121.434.4
Llama 3.3 (A)59.674.766.312.082.121.074.492.182.3
NNNetNav54.569.661.1------
Qwen2.5-VL (A)59.388.671.117.271.427.886.973.879.8
Qwen2.5-VL (S)58.587.370.015.464.324.883.359.569.4
WebArenaAER-C68.883.275.3------
AER-V67.684.074.9------
Claude 3.7 S. (A)69.389.177.911.828.616.784.988.686.7
Claude 3.7 S. (S)69.389.177.914.838.121.380.885.182.9
Functional79.053.864.0------
GPT-4o (A)70.289.178.59.690.517.482.993.988.1
GPT-4o (S)69.989.978.77.976.214.382.792.187.1
GPT-4o Mini (A)63.590.874.710.171.417.770.339.550.6
GPT-4o Mini (S)66.986.675.57.528.611.986.711.420.1
Llama 3.3 (A)68.286.676.38.676.215.479.786.082.7
NNNetNav54.390.867.9------
Qwen2.5-VL (A)63.694.175.96.728.610.984.763.272.4
Qwen2.5-VL (S)62.992.474.89.438.115.190.550.064.4
WorkArenaAER-C100.081.189.6------
AER-V96.473.083.1------
Claude 3.7 S. (A)85.091.988.38.350.014.376.086.480.8
Claude 3.7 S. (S)85.378.481.70.00.00.077.377.377.3
Functional100.091.995.8------
GPT-4o (A)94.694.694.62.650.05.070.486.477.5
GPT-4o (S)93.881.187.05.3100.010.072.081.876.6
GPT-4o Mini (A)84.286.585.33.350.06.266.736.447.1
GPT-4o Mini (S)90.375.782.40.00.00.0100.018.230.8
Llama 3.3 (A)94.389.291.70.00.00.081.881.881.8
NNNetNav77.391.983.9------
Qwen2.5-VL (A)87.291.989.50.00.00.0100.059.174.3
Qwen2.5-VL (S)93.881.187.00.00.00.086.759.170.3
WorkArena++AER-C66.742.351.8------
AER-V59.330.840.5------
Claude 3.7 S. (A)66.762.764.717.138.923.787.597.492.2
Claude 3.7 S. (S)66.750.057.113.661.122.287.398.992.8
Functional83.338.552.6------
GPT-4o (A)63.055.859.25.5100.010.385.698.591.6
GPT-4o (S)59.653.856.65.5100.010.484.598.290.8
GPT-4o Mini (A)49.476.960.15.283.39.787.852.966.1
GPT-4o Mini (S)54.865.459.64.633.38.196.520.233.4
Llama 3.3 (A)62.761.562.14.783.38.986.793.890.1
NNNetNav43.278.855.8------
Qwen2.5-VL (A)60.378.868.37.577.813.791.979.085.0
Qwen2.5-VL (S)64.473.168.56.477.811.893.275.783.6
", + "image_path": "3638d2893524c1d751349fe73edd5030d584b750e19926704cc9ef9bb7bf8899.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 686, + 504, + 709 + ], + "lines": [ + { + "bbox": [ + 105, + 686, + 504, + 709 + ], + "spans": [ + { + "bbox": [ + 105, + 686, + 504, + 709 + ], + "type": "text", + "content": "Table 11: Finegrained results by benchmark and judge for Claude 3.7 Sonnet agent. We report the precision (P) as the primary metric, and F1 and recall (R) as the auxiliary metrics." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 115, + 102, + 495, + 668 + ], + "blocks": [ + { + "bbox": [ + 115, + 102, + 495, + 668 + ], + "lines": [ + { + "bbox": [ + 115, + 102, + 495, + 668 + ], + "spans": [ + { + "bbox": [ + 115, + 102, + 495, + 668 + ], + "type": "table", + "html": "
BenchmarkJudgeSuccessSide EffectRepetition
PRF1PRF1PRF1
AssistantBenchAER-C83.362.571.4------
AER-V83.362.571.4------
Claude 3.7 S. (A)87.587.587.57.133.311.870.194.080.3
Claude 3.7 S. (S)71.462.566.77.733.312.570.892.080.0
Functional25.012.516.7------
GPT-4o (A)77.887.582.43.2100.06.368.696.080.0
GPT-4o (S)77.887.582.43.2100.06.367.194.078.3
GPT-4o Mini (A)80.0100.088.92.766.75.259.332.041.6
GPT-4o Mini (S)80.0100.088.93.433.36.390.018.030.0
Llama 3.3 (A)75.075.075.03.4100.06.665.896.078.0
NNNetNav20.862.531.2------
Qwen2.5-VL (A)72.7100.084.20.00.00.073.862.067.4
Qwen2.5-VL (S)70.087.577.85.766.710.572.252.060.5
VisualWebArenaAER-C56.070.962.6------
AER-V61.279.769.2------
Claude 3.7 S. (A)61.077.268.216.735.722.778.796.886.8
Claude 3.7 S. (S)64.874.769.417.142.924.577.897.686.6
Functional85.258.269.2------
GPT-4o (A)63.086.172.712.489.321.875.298.485.2
GPT-4o (S)60.782.369.912.492.921.972.799.283.9
GPT-4o Mini (A)57.983.568.410.764.318.474.446.056.9
GPT-4o Mini (S)57.473.464.411.235.717.187.121.434.4
Llama 3.3 (A)59.674.766.312.082.121.074.492.182.3
NNNetNav54.569.661.1------
Qwen2.5-VL (A)59.388.671.117.271.427.886.973.879.8
Qwen2.5-VL (S)58.587.370.015.464.324.883.359.569.4
WebArenaAER-C68.883.275.3------
AER-V67.684.074.9------
Claude 3.7 S. (A)69.389.177.911.828.616.784.988.686.7
Claude 3.7 S. (S)69.389.177.914.838.121.380.885.182.9
Functional79.053.864.0------
GPT-4o (A)70.289.178.59.690.517.482.993.988.1
GPT-4o (S)69.989.978.77.976.214.382.792.187.1
GPT-4o Mini (A)63.590.874.710.171.417.770.339.550.6
GPT-4o Mini (S)66.986.675.57.528.611.986.711.420.1
Llama 3.3 (A)68.286.676.38.676.215.479.786.082.7
NNNetNav54.390.867.9------
Qwen2.5-VL (A)63.694.175.96.728.610.984.763.272.4
Qwen2.5-VL (S)62.992.474.89.438.115.190.550.064.4
WorkArenaAER-C100.081.189.6------
AER-V96.473.083.1------
Claude 3.7 S. (A)85.091.988.38.350.014.376.086.480.8
Claude 3.7 S. (S)85.378.481.70.00.00.077.377.377.3
Functional100.091.995.8------
GPT-4o (A)94.694.694.62.650.05.070.486.477.5
GPT-4o (S)93.881.187.05.3100.010.072.081.876.6
GPT-4o Mini (A)84.286.585.33.350.06.266.736.447.1
GPT-4o Mini (S)90.375.782.40.00.00.0100.018.230.8
Llama 3.3 (A)94.389.291.70.00.00.081.881.881.8
NNNetNav77.391.983.9------
Qwen2.5-VL (A)87.291.989.50.00.00.0100.059.174.3
Qwen2.5-VL (S)93.881.187.00.00.00.086.759.170.3
WorkArena++AER-C66.742.351.8------
AER-V59.330.840.5------
Claude 3.7 S. (A)66.762.764.717.138.923.787.597.492.2
Claude 3.7 S. (S)66.750.057.113.661.122.287.398.992.8
Functional83.338.552.6------
GPT-4o (A)63.055.859.25.5100.010.385.698.591.6
GPT-4o (S)59.653.856.65.5100.010.484.598.290.8
GPT-4o Mini (A)49.476.960.15.283.39.787.852.966.1
GPT-4o Mini (S)54.865.459.64.633.38.196.520.233.4
Llama 3.3 (A)62.761.562.14.783.38.986.793.890.1
NNNetNav43.278.855.8------
Qwen2.5-VL (A)60.378.868.37.577.813.791.979.085.0
Qwen2.5-VL (S)64.473.168.56.477.811.893.275.783.6
", + "image_path": "4863bb5ba625a6f6e63f09af52fe2c622bd6c22434726514333cd02a82329430.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 686, + 504, + 708 + ], + "lines": [ + { + "bbox": [ + 105, + 686, + 504, + 708 + ], + "spans": [ + { + "bbox": [ + 105, + 686, + 504, + 708 + ], + "type": "text", + "content": "Table 12: Finegrained results by benchmark and judge for GPT-4o agent. We report the precision (P) as the primary metric, and F1 and recall (R) as the auxiliary metrics." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "bbox": [ + 121, + 87, + 224, + 99 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 87, + 224, + 99 + ], + "spans": [ + { + "bbox": [ + 121, + 87, + 224, + 99 + ], + "type": "text", + "content": "System Prompt Template" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 121, + 108, + 488, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 108, + 488, + 169 + ], + "spans": [ + { + "bbox": [ + 121, + 108, + 488, + 169 + ], + "type": "text", + "content": "You are a Language Model specialized in judging the performance of web agents in web navigation tasks. For a certain website, you are given the goal of a navigation task, the current URL of the webpage, the actions taken by the agent, and the thought process of the agent. Additionally, you will have access to the first and last screenshots, as well as the last accessibility tree. Your task is to answer several questions about the agent's performance in the task." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 121, + 178, + 478, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 178, + 478, + 198 + ], + "spans": [ + { + "bbox": [ + 121, + 178, + 478, + 198 + ], + "type": "text", + "content": "Question 1: Was the sequence of actions below successful in achieving the goal? \nChoices: Successful, Unsuccessful" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 121, + 208, + 488, + 238 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 208, + 488, + 238 + ], + "spans": [ + { + "bbox": [ + 121, + 208, + 488, + 238 + ], + "type": "text", + "content": "Question 2: Did the agent perform unnecessary actions that could lead to unintended side effects? \nChoices: Yes, No" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 121, + 247, + 488, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 247, + 488, + 269 + ], + "spans": [ + { + "bbox": [ + 121, + 247, + 488, + 269 + ], + "type": "text", + "content": "Question 3: Did the agent perform the task optimally, by only performing necessary actions and avoiding unnecessary ones?" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 121, + 277, + 487, + 298 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 277, + 487, + 298 + ], + "spans": [ + { + "bbox": [ + 121, + 277, + 487, + 298 + ], + "type": "text", + "content": "Choices: 1. Complete Failure, 2. Suboptimal, 3. Somewhat Optimal, 4. Completely Optimal" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 121, + 308, + 474, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 308, + 474, + 338 + ], + "spans": [ + { + "bbox": [ + 121, + 308, + 474, + 338 + ], + "type": "text", + "content": "Question 4: Did the agent loop through a sequence of actions that did not make progress towards the goal? \nChoices: Yes, No" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 121, + 347, + 487, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 347, + 487, + 377 + ], + "spans": [ + { + "bbox": [ + 121, + 347, + 487, + 377 + ], + "type": "text", + "content": "You should provide your reasoning process for each question, and you can refer to the screenshots and the accessibility tree to support your answers. Your answer must follow the following format:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 121, + 387, + 312, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 387, + 312, + 437 + ], + "spans": [ + { + "bbox": [ + 121, + 387, + 312, + 437 + ], + "type": "text", + "content": "your reasoning here answer answer answer answer" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 174, + 460, + 436, + 473 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 174, + 460, + 436, + 473 + ], + "spans": [ + { + "bbox": [ + 174, + 460, + 436, + 473 + ], + "type": "text", + "content": "Figure 5: System Prompt Template used for the simplified judge" + } + ] + } + ], + "index": 9, + "type": "text" + }, + { + "bbox": [ + 121, + 488, + 224, + 502 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 488, + 224, + 502 + ], + "spans": [ + { + "bbox": [ + 121, + 488, + 224, + 502 + ], + "type": "text", + "content": "User Prompt Template" + } + ] + } + ], + "index": 10 + }, + { + "type": "code", + "bbox": [ + 121, + 510, + 489, + 692 + ], + "blocks": [ + { + "bbox": [ + 121, + 510, + 489, + 692 + ], + "lines": [ + { + "bbox": [ + 121, + 510, + 489, + 692 + ], + "spans": [ + { + "bbox": [ + 121, + 510, + 489, + 692 + ], + "type": "text", + "content": "The user goal is: {goal} \nThe agent performed the following actions: \nStep: {step_number} \nURL: {url} \nAction: {action} \nReasoning: {reasoning} \n----- \nThe last accessibility tree is: \n{axtree} \nHere is the screenshot of the last step. \n{screenshot} \nProvide your reasoning and answer the four questions from the system prompt, using \nthe specified format." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "code_body" + } + ], + "index": 11, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 179, + 715, + 430, + 727 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 179, + 715, + 430, + 727 + ], + "spans": [ + { + "bbox": [ + 179, + 715, + 430, + 727 + ], + "type": "text", + "content": "Figure 6: User Prompt Template used for the simplified judge" + } + ] + } + ], + "index": 12, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_08xxx/2504.08966/d7a19180-6a88-4095-b8c3-57d4a8694136_content_list.json b/data/2025/2504_08xxx/2504.08966/d7a19180-6a88-4095-b8c3-57d4a8694136_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..449c5b631f5427ec3809580b2a6249574cb427df --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/d7a19180-6a88-4095-b8c3-57d4a8694136_content_list.json @@ -0,0 +1,3458 @@ +[ + { + "type": "text", + "text": "PACT: Pruning and Clustering-Based Token Reduction for Faster Visual Language Models", + "text_level": 1, + "bbox": [ + 133, + 128, + 867, + 175 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Mohamed Dhouib", + "bbox": [ + 220, + 204, + 367, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "LIX, École Polytechnique, IP Paris, France \nmohamed.dhouib@polytechnique.edu", + "bbox": [ + 124, + 220, + 465, + 257 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Sonia Vanier", + "bbox": [ + 243, + 266, + 349, + 281 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "LIX, École Polytechnique, IP Paris, France \nsonia.vanier@polytechnique.edu", + "bbox": [ + 125, + 282, + 467, + 319 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Davide Buscaldi", + "bbox": [ + 620, + 204, + 754, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "LIPN, Université Sorbonne Paris Nord, France davide.buscaldi@lipn.univ-paris13.fr", + "bbox": [ + 504, + 222, + 872, + 255 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Aymen Shabou", + "bbox": [ + 627, + 266, + 750, + 282 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "DataLab Groupe, Crédit Agricole S.A, France", + "bbox": [ + 506, + 284, + 870, + 300 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "aymen.shabou@credit-agricole-sa.fr", + "bbox": [ + 535, + 304, + 836, + 316 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 248, + 354, + 326, + 369 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Visual Language Models require substantial computational resources for inference due to the additional input tokens needed to represent visual information. However, these visual tokens often contain redundant and unimportant information, resulting in an unnecessarily high number of tokens. To address this, we introduce PACT, a method that reduces inference time and memory usage by pruning irrelevant tokens and merging visually redundant ones at an early layer of the language model. Our approach uses a novel importance metric to identify unimportant tokens without relying on attention scores, making it compatible with FlashAttention. We also propose a novel clustering algorithm, called Distance Bounded Density Peak Clustering, which efficiently clusters visual tokens while constraining the distances between elements within a cluster by a predefined threshold. We demonstrate the effectiveness of PACT through extensive experiments.", + "bbox": [ + 89, + 386, + 485, + 643 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 91, + 676, + 220, + 694 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Extending Large language models to modalities other than text [11, 18, 19, 55, 56] has seen success in recent years across various domains, especially in the visual domain with models like LLaVA [31] and Qwen-VL [4]. State-of-the-art Visual Language Models generally consist of three main components: a vision encoder, a connector, and a language model. The vision encoder converts input images into visual tokens, which are passed through the connector and then fed to the language model along with the input text. While this architecture has shown impressive performance across different tasks, it suffers from high computational cost due to the large number of visual tokens. In this paper, we introduce two complementary methods to", + "bbox": [ + 89, + 703, + 485, + 901 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "optimize Visual Language Models by reducing inference time and memory requirements: a pruning module and a clustering algorithm. These methods can be used independently or combined, forming the PACT approach for greater effectiveness. Notably, our pruning and clustering modules, as well as PACT, are applied at inference time and thus require no additional training. The pruning module identifies unimportant visual tokens based on a novel importance metric that evaluates each token's relevance without relying on attention scores. This makes it compatible with FlashAttention [12], as FlashAttention does not support the calculation of attention scores. The second module introduces a novel clustering algorithm, Distance Bounded Density Peak Clustering (DBDPC), which clusters visual tokens while ensuring that the distances between elements within a cluster are constrained by a predefined threshold. By combining these two methods, we develop PACT. First, the pruning module eliminates unimportant tokens, then the DBDPC algorithm clusters the remaining ones. Tokens that were initially pruned but are sufficiently close to the constructed clusters are reincorporated, ensuring that valuable information from the pruned tokens is recovered. Finally, the tokens within each cluster are merged into a single representative token, reducing the total token count.", + "bbox": [ + 511, + 354, + 906, + 715 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "By combining both pruning and clustering, PACT achieves an effective visual token reduction, addressing both irrelevant and redundant tokens. When applied to LLaVA-OneVision-7B, PACT achieves a $50\\%$ visual token reduction with negligible performance loss. Moreover, PACT exhibits significantly less performance degradation at higher reduction ratios compared to previous methods, achieving $71.3\\%$ visual token reduction ratio with only $1.4\\%$ performance drop, whereas previous state-of-the-art methods show at best a $4.4\\%$ performance drop at an equal reduction ratio. Our contributions are as follows:", + "bbox": [ + 511, + 717, + 908, + 883 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "- We propose a novel visual token pruning metric that does", + "bbox": [ + 511, + 885, + 906, + 901 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.08966v1 [cs.CV] 11 Apr 2025", + "bbox": [ + 22, + 262, + 60, + 705 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "not rely on attention scores, ensuring compatibility with FlashAttention, and empirically validate its effectiveness.", + "bbox": [ + 102, + 90, + 480, + 119 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We introduce a new clustering algorithm aimed at reducing visual redundancy and show its superiority over other clustering algorithms for visual token reduction.", + "- We show that combining pruning with clustering-based merging surpasses either technique alone for visual token reduction. By integrating our pruning and clustering algorithms, we propose a novel approach, PACT, and demonstrate that it outperforms previous and concurrent works [3, 6, 9, 30, 44]. The codebase used to obtain the results in this study is available at https://github.com/orailix/PACT/tree/main." + ], + "bbox": [ + 91, + 121, + 482, + 287 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related work", + "text_level": 1, + "bbox": [ + 89, + 303, + 228, + 319 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Visual language models", + "text_level": 1, + "bbox": [ + 89, + 329, + 307, + 345 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Since the introduction of BLIP-2 [28], the use of a visual encoder followed by a connector that feeds visual vectors to the language model has become the standard architecture for Visual Language Models (VLMs) [7, 17, 50]. Recent models [10, 27, 49] have enhanced VLM architecture with high-resolution handling, which is necessary for document understanding tasks [13, 23]. LLaVA-OneVision [27] divides images into $384 \\times 384$ crops, encodes each part with SigLIP [54], and uses bilinear interpolation to reduce token count up to 8,748 tokens. InternVL2 [10] splits images into $448 \\times 448$ tiles, processing up to 40 tiles per image with InternViT [10], and applies pixel shuffle to reduce the number of visual tokens, producing up to 10,240 tokens. Qwen-VL2 [49] uses 2D Rotary Positional Embeddings for dynamic resolution support and merges adjacent tokens via an MLP layer, yet still requires over 10,000 tokens for high resolution images. While these models apply token reduction by merging adjacent tokens to preserve structure, they do not address token irrelevance or redundancy, limiting efficiency.", + "bbox": [ + 89, + 351, + 482, + 640 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2. Visual token reduction", + "text_level": 1, + "bbox": [ + 89, + 651, + 302, + 666 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Reducing the number of visual tokens in Vision Transformers (ViT) has been a key focus of the research community for several years. EViT [29] identifies and merges irrelevant tokens by relying on the attention scores between the class token ([CLS]) and visual tokens. ToME [6] proposed a simple yet effective approach that iteratively merges similar tokens throughout the ViT layers. Building on these ideas, recent efforts have extended visual token reduction techniques to VLMs. LaVIT [21] used the Gumbel-Softmax [20] to train a mask that selects tokens for retention, merging discarded tokens into retained ones via additional attention layers. LLaVA-PruMerge [44] accelerates LLAVA 1.5 [31] by leveraging the attention scores between the [CLS] token and visual tokens in the last layer of the ViT encoder to decide which tokens to retain, while HiRED [3] refines", + "bbox": [ + 89, + 674, + 482, + 900 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "this approach by allocating token budgets based on attention from earlier layers. However, both these methods are only applicable to architectures where a ViT is used and a [CLS] token is added to the input visual sequence, making them incompatible with the majority state-of-the-art VLMs, which do not use a [CLS] token. Moreover, both methods attribute scores to tokens at the output of the visual encoder, but recent VLMs merge adjacent visual tokens before passing them to the language model. It is unclear how to attribute pre-merging scores to the resulting tokens, making LLaVA-PruMerge and HiRED inapplicable. We note that LLaVA-PruMerge mitigates information loss by merging pruned tokens with retained ones. However, it does not merge similar retained tokens; therefore, it does not address visual redundancy, a typical limitation of pruning-based methods. TRIM [46] prunes tokens based on similarity with pooled text from CLIP [42]. However, as TRIM relies on textual information for pruning, it is less suitable for multi-turn conversations where, in practice, visual tokens would be pruned solely based on the text information available during the image's forward pass, potentially losing crucial information required to answer subsequent prompts. FastV [9] evaluates token importance via average attention scores, which is not compatible with FlashAttention, adding computational overhead for recent VLMs. VTW [30] removes tokens in deeper layers. While this method shows promising results, its reduction of computational costs is limited as visual tokens are only withdrawn in later layers. These previous methods address only one of two issues: the presence of unimportant tokens or visual redundancy. In this work, we introduce PACT, a novel approach that tackles both issues simultaneously by pruning irrelevant tokens and merging visually redundant ones.", + "bbox": [ + 511, + 90, + 906, + 590 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 513, + 603, + 604, + 618 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this section, we present PACT, a method that aims to reduce VLMs inference time and memory usage by pruning unimportant tokens and merging visually redundant ones at an early layer $L$ of the language model. PACT consists of three steps: First, unimportant tokens are identified. Next, the remaining tokens are clustered. Finally, tokens in each cluster, along with sufficiently close tokens that were initially discarded, are merged. PACT operates within a selected layer $L$ of the language model and is applicable in scenarios where visual tokens are fed into the language model, regardless of the architecture of the visual encoder or connector. The three-step process of PACT is illustrated in Figure 1. We denote the hidden states at layer $L$ by $\\mathbf{H} \\in \\mathbb{R}^{n \\times d}$ , where $n$ is the number of visual tokens and $d$ is the dimensionality of the hidden states. We denote by $\\mathbf{K}, \\mathbf{Q} \\in \\mathbb{R}^{n \\times n_h \\times d_h}$ the key and query matrices for the visual tokens at layer $L$ , where $n_h$ represents the number of attention heads and $d_h$ is the dimensionality of each attention heads.", + "bbox": [ + 511, + 628, + 908, + 900 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/c055f89776f93fc24bbcc1f8f24389d305be1809f3b8c99f2fb4443a656c9543.jpg", + "image_caption": [ + "Figure 1. Simplified illustration of PACT. This figure illustrates the three-step process of PACT: (1) First, EUTI is used to prune visual tokens deemed unimportant; (2) Then, DBDPC is applied to cluster the remaining tokens, ensuring that the distance between each token and its corresponding cluster center is smaller than the cutoff distance; (3) Finally, initially pruned tokens that are close to cluster centers are reintegrated, and the elements within each cluster are merged to form the reduced set of visual tokens." + ], + "image_footnote": [], + "bbox": [ + 133, + 88, + 864, + 285 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Algorithm 1 EUTI", + "text_level": 1, + "bbox": [ + 91, + 376, + 220, + 391 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Input: Hidden states $\\mathbf{H} \\in \\mathbb{R}^{n \\times d}$ ; key and query matrices", + "bbox": [ + 91, + 397, + 480, + 412 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "$\\mathbf{K}, \\mathbf{Q} \\in \\mathbb{R}^{n \\times n_h \\times d_h}$ ; pruning percentage $\\lambda \\in [0,1]$", + "bbox": [ + 107, + 412, + 439, + 428 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Output: Sets of important and unimportant visual tokens", + "bbox": [ + 91, + 429, + 472, + 443 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Step 1: Calculate the global query vector", + "bbox": [ + 107, + 444, + 395, + 458 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {Q} _ {\\text {g l o b a l}} = \\frac {1}{n} \\sum_ {i = 1} ^ {n} \\mathbf {Q} _ {i}\n$$\n", + "text_format": "latex", + "bbox": [ + 107, + 458, + 251, + 474 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Step 2: Compute the importance score for each visual token", + "bbox": [ + 107, + 474, + 482, + 502 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "for all $i = 1,\\dots ,n$ do", + "bbox": [ + 107, + 503, + 259, + 518 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\ns _ {i} = \\frac {1}{n _ {h}} \\sum_ {j = 1} ^ {n _ {h}} \\operatorname {S o f t m a x} \\left(\\mathbf {k} _ {i} ^ {(j)} \\cdot \\mathbf {Q} _ {\\text {g l o b a l}} ^ {(j)}\\right) \\cdot \\left\\| \\mathbf {h} _ {i} \\right\\| _ {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 133, + 518, + 447, + 537 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "end for", + "bbox": [ + 107, + 537, + 161, + 549 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Step 3: Define sets of important and unimportant tokens", + "bbox": [ + 107, + 551, + 482, + 579 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nS _ {\\text {i m p o r t a n t}} = \\left\\{i \\mid s _ {i} \\geq \\text {P e r c e n t i l e} (s, \\lambda) \\right\\}\n$$\n", + "text_format": "latex", + "bbox": [ + 107, + 580, + 359, + 595 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nS _ {\\text {u n i m p o r t a n t}} = \\{i \\mid s _ {i} < \\text {P e r c e n t i l e} (s, \\lambda) \\}\n$$\n", + "text_format": "latex", + "bbox": [ + 109, + 595, + 370, + 612 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Return $S_{\\text{important}}$ , $S_{\\text{unimportant}}$", + "bbox": [ + 107, + 612, + 297, + 628 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "tion head. For simplicity, we omit the layer index in the notation. We denote the position index of a token by a subscript, while the attention head is indicated by a superscript. For instance, $\\mathbf{k}_i^{(j)}$ represents the key vector corresponding to the $i$ -th visual token and the $j$ -th attention head.", + "bbox": [ + 89, + 662, + 482, + 739 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Unimportant tokens identification", + "text_level": 1, + "bbox": [ + 89, + 755, + 387, + 771 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "A straightforward approach to identifying unimportant tokens at a certain layer $L$ of the used language model is to define the importance of each token as the total attention score that a given token receives from all other tokens [9]. However, this method has three main drawbacks. First, current VLMs utilize FlashAttention [12], which does not support outputting attention scores. Secondly, attention scores are computed with masking, which introduces", + "bbox": [ + 89, + 780, + 482, + 900 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "biases. Tokens at the end of a sequence tend to receive lower average attention scores since fewer tokens attend to them. Calculating the average attention score for each token based solely on the tokens that attend to it can mitigate this masking effect but introduces a new bias: end-of-sequence tokens may exhibit higher scores as they receive attention mainly from nearby tokens. This leads to either earlier or later tokens being pruned more frequently, as shown in Fig. 2. Such positional bias should be avoided, as pruning should depend solely on the information that visual tokens hold, not their position. Finally, relying only on keys and queries at a single layer to determine an importance metric may fail to fully capture the significance of visual tokens across all layers of the language model, mainly because each self-attention layer focuses on different aspects of the visual tokens. To address this, we propose an importance metric that incorporates the accumulated in", + "bbox": [ + 511, + 377, + 906, + 633 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/080873109efc60b65c42f2aecfac3032a68e1da59662837163d12b46e5cb4846.jpg", + "image_caption": [ + "(a) Average attention scores as a function of Position IDs." + ], + "image_footnote": [], + "bbox": [ + 524, + 665, + 689, + 744 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/29dbc9ef9e90d1c096ca3c827f33d88639e68bf65ca8cf013f9381ab98854e7a.jpg", + "image_caption": [ + "(b) Average attention scores relative to non-masked tokens as a function of Position IDs.", + "Figure 2. Illustration of the bias induced by the use of the average attention scores across visual tokens as a pruning metric. In (a), averaging attention over all tokens favors earlier tokens, leading to pruning later tokens more frequently. In (b), averaging only over attending tokens reverses the bias, leading to earlier tokens being pruned more often." + ], + "image_footnote": [], + "bbox": [ + 723, + 666, + 885, + 744 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/d09f70419ec32d57ee282deb3099146e4993cb5122244a5ca35c44d6d395531b.jpg", + "image_caption": [ + "Figure 3. Illustration of visual token norm statistics at the fourth layer of LLaVA-OneVision-7B." + ], + "image_footnote": [], + "bbox": [ + 106, + 90, + 467, + 255 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "formation from the hidden states and the layer-specific information from the keys and queries at an early layer $L$ . We refer to this method as Efficient Unimportant Tokens Identification (EUTI). We speculate that the norm of hidden states can provide critical information about the importance of each visual token, as they reflect how much information a particular token carries through the network. Figure 3 presents statistics on the hidden state norms of visual tokens at the fourth layer of LLaVA-OneVision-7B, indicating a high variance. This variance suggests that certain visual tokens accumulate more information through residual connections and may therefore be more important for subsequent calculations. To leverage information from both hidden state norms and the key and query vectors, we first compute a global query vector $\\mathbf{Q}_{\\mathrm{global}}$ as the average of all query vectors across visual tokens:", + "bbox": [ + 88, + 321, + 483, + 565 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {Q} _ {\\text {g l o b a l}} = \\frac {1}{n} \\sum_ {i = 1} ^ {n} \\mathbf {Q} _ {i} \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 218, + 571, + 482, + 611 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This vector represents the overall query information requested by visual tokens at layer $L$ across all attention", + "bbox": [ + 89, + 618, + 482, + 648 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/66838683c6d4d826816b766acbd7ffa79eb6b730ce42f44926fc5a77a836db69.jpg", + "image_caption": [ + "Figure 4. Illustration of the maximum distance between the keys of visual tokens for the first 10 layers of LLaVA-OneVision-7B before the application of rotary embeddings." + ], + "image_footnote": [], + "bbox": [ + 107, + 669, + 467, + 840 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "heads. The importance score for each visual token is then computed by first taking the dot product between its key and the global query for each attention head. A softmax is applied across visual tokens within each attention head, followed by averaging across attention heads. The final score is obtained by scaling the result with the hidden state norm:", + "bbox": [ + 511, + 90, + 905, + 181 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\ns _ {i} = \\frac {1}{n _ {h}} \\sum_ {j = 1} ^ {n _ {h}} \\operatorname {S o f t m a x} \\left(\\mathbf {k} _ {i} ^ {(j)} \\cdot \\mathbf {Q} _ {\\text {g l o b a l}} ^ {(j)}\\right) \\cdot \\| \\mathbf {h} _ {i} \\| _ {2} \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 557, + 200, + 905, + 244 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Then, we divide the visual tokens into important and unimportant tokens, using a parameter $\\lambda \\in [0,1]$ to control the percentage of tokens deemed unimportant. The two sets are defined as follows:", + "bbox": [ + 511, + 250, + 905, + 309 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nS _ {\\text {i m p o r t a n t}} = \\left\\{i \\mid s _ {i} \\geq \\text {P e r c e n t i l e} (s, \\lambda) \\right\\} \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 580, + 319, + 906, + 338 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nS _ {\\text {u n i m p o r t a n t}} = \\left\\{i \\mid s _ {i} < \\text {P e r c e n t i l e} (s, \\lambda) \\right\\} \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 576, + 343, + 905, + 361 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Unimportant tokens can be pruned, or the resulting sets can be combined with a clustering algorithm to further reduce the number of visual tokens, as we will show in the next section. The full EUTI algorithm is illustrated in Algorithm 1. We note that in the case where Rotary Embeddings are used [47], we use the keys and queries before their application to avoid any positional bias.", + "bbox": [ + 511, + 364, + 905, + 470 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Clustering-based merging of visual tokens", + "text_level": 1, + "bbox": [ + 511, + 478, + 872, + 494 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Distance Bounded Density Peak Clustering Relying solely on the importance scores presented above to prune unimportant tokens can lead to a significant reduction in visual tokens, retaining only important ones. However, redundant information may still be present across retained visual tokens. Therefore, we propose merging the redundant visual tokens using a clustering algorithm. We desire our clustering algorithm to have the following characteristics:", + "bbox": [ + 511, + 500, + 905, + 621 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(a) Low computational time.", + "(b) Avoid assigning points that are far from each other, in terms of feature similarity, into the same cluster." + ], + "bbox": [ + 511, + 623, + 903, + 667 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/8cef1152cfc8f46979a833a4e4fbf5773ece6fb604250f4f1f1cd8d86b2b607f.jpg", + "table_caption": [ + "Table 1. Throughput ratio, reduction ratio, and GPU memory usage for PACT, FastV, VTW, and ToME applied to LLaVA-OneVision-7B. Results are reported at a $98.6\\%$ Approach-to-Reference Metric Ratio." + ], + "table_footnote": [], + "table_body": "
No reductionPACT (ours)FastVVTWToME
Reduction Ratio0%71.3%50%25%40%
LLM Throughput Ratio100%225%165%160%137%
GPU Maximum Memory Consumption (GB)27.419.0530.419.221.4
", + "bbox": [ + 514, + 750, + 905, + 797 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Condition (b) ensures that outliers are not assigned to distant cluster centers, as we speculate that these outliers contain important information and should only be merged with nearby outliers or remain as single points in separate clusters. Condition (b) also guarantees that points in each cluster will be relatively close to each other, which minimizes", + "bbox": [ + 511, + 809, + 905, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "information loss when assigning a single vector as their representative. The Density Peaks Clustering (DPC) algorithm [5] is appealing in this context because it satisfies condition (a), unlike iterative clustering algorithms like k-means [2]. However, DPC does not satisfy condition (b) as it can form large clusters where boundary points may be distant from each other. The same issue arises with other algorithms such as DBSCAN [14]. Therefore, we propose a new clustering algorithm, which we call Distance Bounded Density Peaks Clustering (DBDPC).", + "bbox": [ + 89, + 90, + 483, + 241 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "DBDPC takes as input a set of vectors $\\{\\mathbf{u}_i\\in \\mathbb{R}^{d_1}\\}_{i = 1}^q$ where $q,d_{1}\\in \\mathbb{N}^{+}$ , and outputs a set of clusters. Our algorithm's output depends on two parameters, the cutoff distance $d_c\\in \\mathbb{R}^+$ and a normalization factor $d_{n}\\in \\mathbb{R}^{+}$ , as well as a distance function $d:\\mathbb{R}^{d_1}\\times \\mathbb{R}^{d_1}\\to \\mathbb{R}^+$ . We define the distance between two vectors $\\mathbf{u}_i$ and $\\mathbf{u}_j$ as:", + "bbox": [ + 89, + 242, + 483, + 335 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nd _ {i j} = d \\left(\\mathbf {u} _ {i}, \\mathbf {u} _ {j}\\right) = 1 - \\frac {\\mathbf {u} _ {i} \\cdot \\mathbf {u} _ {j}}{\\| \\mathbf {u} _ {i} \\| _ {2} \\| \\mathbf {u} _ {j} \\| _ {2}} \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 165, + 345, + 483, + 377 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Then the local density $\\rho_{i}$ is calculated as:", + "bbox": [ + 89, + 388, + 367, + 402 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\rho_ {i} = \\sum_ {j} e ^ {- d _ {i j} / d _ {n}} \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 223, + 416, + 482, + 449 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We process the $\\mathbf{u}_i$ vectors from highest to lowest $\\rho$ values and designate a vector as a cluster center if its minimum distance from already selected centers is greater than $d_c$ . Each vector $\\mathbf{u}_i$ is then assigned to the cluster of the closest center. Our algorithm guarantees that the distance from each vector to its cluster center is less than $d_c$ , thereby satisfying condition (b) stated above. The full DBDPC algorithm is detailed in Algorithm 2. The center identification process in DBDPC ensures that inter-cluster distances are upper-bounded by $2d_c \\times (2 - d_c)$ while distances between cluster centers are lower-bounded by $d_c$ , which we formally prove in Appendix B. We note that several parts of our algorithm are presented as for-loops for clarity. However, all computations are parallelizable on GPU, as there are no dependencies between the elements of each loop, except for the part where we select cluster centers. For this part, we use a recursive algorithm that efficiently identifies an initial set of centers and discarded vectors, thereby reducing the number of vectors to be processed. We explain this in detail in Appendix D. For a comparison between DBDPC and DPC, as well as a qualitative comparison with other clustering algorithms, refer to Appendix C.", + "bbox": [ + 89, + 460, + 483, + 792 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Which vectors should be used for distance calculation?", + "bbox": [ + 89, + 795, + 482, + 808 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As previously discussed, the DBDPC algorithm operates on a set of vectors that are used for distance calculation. To achieve effective clustering, the dot product between these vectors needs to accurately reflect the similarity between the corresponding visual tokens. Fortunately, transformers address this issue through the QKV self-attention mechanism.", + "bbox": [ + 89, + 810, + 483, + 900 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Algorithm 2 DBDPC", + "text_level": 1, + "bbox": [ + 514, + 90, + 663, + 104 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Input: Cutoff distance $d_{c} \\in \\mathbb{R}^{+}$ , normalization factor $d_{n} \\in \\mathbb{R}^{+}$ , set of vectors $\\{\\mathbf{u}_i \\in \\mathbb{R}^{d_1}\\}_{i=1}^q$", + "bbox": [ + 513, + 111, + 906, + 142 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Output: Cluster center indices $C_{\\text{centers}}$ , element indices in each cluster $C_{\\text{elements}}$", + "bbox": [ + 513, + 142, + 905, + 171 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "for all pairs $(\\mathbf{u}_i,\\mathbf{u}_j)$ do", + "bbox": [ + 531, + 172, + 691, + 186 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nd _ {i j} = 1 - \\frac {\\mathbf {u} _ {i} \\cdot \\mathbf {u} _ {j}}{\\| \\mathbf {u} _ {i} \\| _ {2} \\| \\mathbf {u} _ {j} \\| _ {2}}\n$$\n", + "text_format": "latex", + "bbox": [ + 555, + 186, + 697, + 205 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "end for", + "bbox": [ + 531, + 205, + 584, + 215 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "for all vectors $\\mathbf{u}_i$ do", + "bbox": [ + 531, + 219, + 669, + 233 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\rho_ {i} = \\sum_ {j = 1} ^ {q} e ^ {- d _ {i j} / d _ {n}}\n$$\n", + "text_format": "latex", + "bbox": [ + 555, + 233, + 692, + 251 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "end for", + "bbox": [ + 531, + 251, + 584, + 261 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Sort vectors by $\\rho_{i}$ in descending order, obtaining indices $[i_1,i_2,\\dots ,i_q]$", + "bbox": [ + 531, + 263, + 903, + 294 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Initialize $C_{\\mathrm{centers}} = \\{i_1\\}$ $C_{\\mathrm{elements}} = \\{i_1:\\emptyset \\}$", + "bbox": [ + 531, + 294, + 823, + 309 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "for all indices $i_k$ in sorted order do", + "bbox": [ + 531, + 310, + 764, + 323 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "if $\\min_{s\\in C_{\\mathrm{centers}}}d_{i_ks} > d_c$ then", + "bbox": [ + 555, + 325, + 751, + 340 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nC _ {\\text {c e n t e r s}} = C _ {\\text {c e n t e r s}} \\cup \\left\\{i _ {k} \\right\\}\n$$\n", + "text_format": "latex", + "bbox": [ + 580, + 340, + 741, + 354 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nC _ {\\text {e l e m e n t s}} \\left[ i _ {k} \\right] = \\emptyset\n$$\n", + "text_format": "latex", + "bbox": [ + 581, + 354, + 687, + 369 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "end if", + "bbox": [ + 555, + 371, + 599, + 382 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "end for", + "bbox": [ + 531, + 386, + 584, + 397 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "for all indices $i$ do", + "bbox": [ + 531, + 400, + 656, + 412 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "$s_i = \\text{argmin}_{s \\in C_{\\text{centers}}} d_{is}$", + "bbox": [ + 555, + 416, + 715, + 430 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nC _ {\\text {e l e m e n t s}} [ s _ {i} ] = C _ {\\text {e l e m e n t s}} [ s _ {i} ] \\cup \\{i \\}\n$$\n", + "text_format": "latex", + "bbox": [ + 555, + 430, + 769, + 446 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "end for", + "bbox": [ + 531, + 446, + 584, + 457 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Return $C_{\\mathrm{centers}}$ $C_{\\mathrm{elements}}$", + "bbox": [ + 531, + 460, + 692, + 474 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Specifically, the key vectors $K$ provide a meaningful representation of each token, tailored for dot product similarity. Therefore, we will use the key vectors in the DBDPC algorithm. Formally, we have:", + "bbox": [ + 511, + 506, + 906, + 566 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nC _ {\\text {c e n t e r s}}, C _ {\\text {e l e m e n t s}} = \\mathrm {D B D P C} \\left(K ^ {\\prime}\\right) \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 598, + 578, + 906, + 595 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $K' = \\{\\mathbf{u}_i \\in K \\mid i \\in S_{\\text{important}}\\}$ is the subset of keys consisting of elements with indices in $S_{\\text{important}}$ .", + "bbox": [ + 511, + 607, + 905, + 638 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "What about unimportant tokens near cluster centers? Tokens initially deemed unimportant but close enough to cluster centers have a high probability of being mislabeled. We add these tokens to the corresponding cluster to limit information loss. Formally, we define a threshold based on a coefficient $\\alpha$ , where any token $\\mathbf{u}_i$ , initially excluded, is added to the cluster of the closest center $s \\in C_{\\text{centers}}$ if its distance to the center satisfies $d_{is} < \\alpha \\cdot d_c$ . Specifically, the new cluster elements set $C_{\\text{elements}}^{(s)}$ is updated as follows:", + "bbox": [ + 511, + 638, + 905, + 777 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nS _ {\\text {a d d e d}} ^ {(s)} = \\left\\{i \\in S _ {\\text {u n i m p o r t a n t}} \\mid s = \\operatorname {a r g m i n} _ {s ^ {\\prime} \\in C _ {\\text {c e n t e r s}}} d _ {i s ^ {\\prime}} \\right. \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 526, + 789, + 903, + 813 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "and $d_{is} < \\alpha \\cdot d_c\\}$", + "bbox": [ + 710, + 811, + 831, + 827 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nC _ {\\text {e l e m e n t s}} ^ {(s)} \\leftarrow C _ {\\text {e l e m e n t s}} ^ {(s)} \\cup S _ {\\text {a d d e d}} ^ {(s)} \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 612, + 843, + 903, + 864 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Merging Finally, the hidden states corresponding to the elements in each cluster are merged. Formally, the merged", + "bbox": [ + 511, + 869, + 906, + 901 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Algorithm 3 PACT", + "text_level": 1, + "bbox": [ + 91, + 90, + 223, + 104 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Input: Hidden states $\\mathbf{H} = [\\mathbf{h}_1, \\dots, \\mathbf{h}_n] \\in \\mathbb{R}^{n \\times d}$ ; key and query matrices $\\mathbf{K}, \\mathbf{Q} \\in \\mathbb{R}^{n \\times n_h \\times d_h}$ ; position IDs $\\mathbf{P} = [p_1, \\dots, p_n]$ ; pruning percentage $\\lambda \\in [0, 1]$ ; cutoff distance $d_c > 0$ ; tolerance coefficient $\\alpha > 0$", + "Output: Merged hidden states $\\mathbf{H}'$ ; new position IDs $\\mathbf{P}'$", + "Step 1: Identify important and unimportant tokens", + "$S_{\\mathrm{important}}$ $S_{\\mathrm{unimportant}}\\gets \\mathrm{EUTI}(\\mathbf{H},\\mathbf{K},\\mathbf{Q},p)$", + "Step 2: Cluster important tokens with DBDPC", + "$\\mathbf{K}^{\\prime}\\gets \\{\\mathbf{k}_{i}\\in \\mathbf{K}\\mid i\\in S_{\\mathrm{important}}\\}$", + "$C_{\\mathrm{centers}}$ $C_{\\mathrm{elements}}\\gets \\mathrm{DBDPC}(\\mathbf{K}^{\\prime},d_{c})$", + "Step 3: Assign unimportant tokens to sufficiently close clusters.", + "for all $i\\in S_{\\mathrm{unimportant}}$ do", + "$s_i\\gets argmin_s d_{is}$", + "if $d_{isi} < \\alpha .d_c$ then", + "$C_{\\mathrm{elements}}^{(s_i)} \\gets C_{\\mathrm{elements}}^{(s_i)} \\cup \\{i\\}$", + "end if", + "end for", + "Step 4: Merge hidden states and assign position IDs", + "for all $s\\in C_{\\mathrm{centers}}$ do", + "$\\mathbf{h}_s^{\\prime}\\gets \\frac{1}{|C_{\\mathrm{elements}}^{(s)}|}\\sum_{i\\in C_{\\mathrm{elements}}^{(s)}}\\mathbf{h}_i$", + "$p_s^\\prime \\gets p_s$", + "end for", + "Return H', P'" + ], + "bbox": [ + 91, + 111, + 483, + 481 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "hidden states are computed as:", + "bbox": [ + 89, + 513, + 294, + 527 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {H} ^ {\\prime} = \\left\\{\\frac {1}{| C _ {\\text {e l e m e n t s}} ^ {(j)} |} \\sum_ {i \\in C _ {\\text {e l e m e n t s}} ^ {(j)}} \\mathbf {h} _ {i} \\mid C _ {\\text {e l e m e n t s}} ^ {(j)} \\in C _ {\\text {e l e m e n t s}} \\right\\} \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 104, + 537, + 482, + 589 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Defining the position IDs Accurately assigning position IDs to each vector in the new hidden states $\\mathbf{H}^{\\prime}$ is crucial, especially for models using Rotary embeddings, as these IDs determine the input image structure or the temporal dependencies of the input video. In order to achieve a low statistical discrepancy compared to regular inference, we assign the position ID for each vector from $H^{\\prime}$ as its corresponding cluster center. The full PACT pipeline is shown in Algorithm 3. When Rotary Embeddings are used, DBDPC uses the keys after these embeddings are applied, whereas EUTI uses the keys and queries before applying these embeddings. For clarity, we omit this detail in Algorithm 3. We also note that both DBDPC and EUTI, as well as PACT, do not use textual tokens. Therefore, visual token reduction is performed independently of the textual context, making our method well-suited for multi-turn conversations.", + "bbox": [ + 89, + 598, + 483, + 839 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Proportional attention Merging tokens reduces their influence in the attention mechanism and can therefore deteriorate performance if many important tokens are merged together. To mitigate this, we employ proportional attention.", + "bbox": [ + 89, + 839, + 483, + 901 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/d0d05d5ff1ed06ed3b1b8cee55b23fa7e8799dd820eae1b3dbb0f21733a09cda.jpg", + "image_caption": [ + "Figure 5. Comparison between PACT, DBDPC, and EUTI against other visual token reduction methods across various reduction ratios applied on LLaVA-OneVision-7B." + ], + "image_footnote": [], + "bbox": [ + 516, + 88, + 709, + 212 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/dae7661cfa177d954013243da7ff518a0070487864e2f9d776b1ef938eb9a64d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 712, + 89, + 903, + 212 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Let $K$ , $Q$ , and $V$ denote the keys, queries, and values at a layer $L'$ , where $L' \\geq L$ . For each attention head $j$ , the attention scores are calculated as follows:", + "bbox": [ + 511, + 306, + 905, + 351 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nA ^ {(j)} = \\operatorname {s o f t m a x} \\left(\\frac {Q ^ {(j)} K ^ {(j) \\top}}{\\sqrt {d _ {l ^ {\\prime}}}} + \\log \\mathbf {W} + \\mathbf {B}\\right) \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 542, + 358, + 905, + 393 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $d_{l'}$ is the dimensionality of the query for each attention head. Here, $\\mathbf{W}$ is a matrix representing the weight of each token, and $\\mathbf{B}$ is the attention mask. Specifically, for visual tokens, $w_{i_0,i_1}$ represents the size of the cluster corresponding to token $i_1$ , for any value of $i_0$ . For each textual token at position $t$ , $w_{i_0,t} = 1$ , as they remain unmerged, retaining a weight of one. By scaling the attention scores based on $\\mathbf{W}$ , the model effectively treats each visual token as if it represents multiple tokens. We note that when using proportional attention, we use PyTorch's scaled dot-product attention, which produces similar results to the official FlashAttention implementation while supporting custom masks.", + "bbox": [ + 511, + 402, + 906, + 598 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Selecting the layer $L$ for token reduction: To ensure maximum computational gain, we must choose an early layer $L$ for visual token reduction. However, we also require that the keys at the selected layer are not too similar, allowing for effective clustering and pruning. Thus, we select the earliest layer where the maximum distance between keys is sufficiently high. Figure 4 shows that in the initial layers of LLaVA-OneVision-7B, the keys corresponding to visual tokens are quite similar, indicating a lack of distinctive features necessary for effective pruning and clustering.", + "bbox": [ + 511, + 599, + 905, + 750 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 511, + 763, + 645, + 779 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Evaluation datasets", + "text_level": 1, + "bbox": [ + 511, + 787, + 700, + 801 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We evaluate the effectiveness of PACT using diverse benchmarks, similar to those used for LLaVA-OneVision-7B, covering single-image, multi-image, and video tasks. We use AI2D [22], TextVQA [45], ChartQA [37], DocVQA [38], and InfographicVQA [39] to assess PACT's ability to reduce visual tokens while maintaining performance", + "bbox": [ + 511, + 809, + 905, + 901 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 925, + 503, + 936 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/bb81e2fa9638c44f00f88ad9a19105b302a2c6038fe67cc94c6d7ee5995711c9.jpg", + "image_caption": [ + "Figure 6. Comparison between PACT and other visual token reduction methods across various reduction ratios applied on Qwen2-VL-7B-Instruct." + ], + "image_footnote": [], + "bbox": [ + 93, + 88, + 480, + 212 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "in text-rich documents. To test reasoning across multiple disciplines, we use MME [15], MMBench [32], MMVet [51], MathVerse [57], MathVista [34], MMMU [53], MMStar [8], and ScienceQA [33]. Additionally, Vibe-Eval [40], MM-LiveBench [26], and LLaVA-Bench-Wilder [25] evaluate its robustness in real-world scenarios and visual chat contexts. We use LLaVA-Interleave Bench [25] and MuirBench [48] to examine PACT's efficiency in token reduction while preserving inter-image reasoning. To assess performance in video comprehension tasks, we use ActivityNet-QA [52], MLVU [58], VideoMME [16], EgoSchema [36], and PerceptionTest [41]. Finally, Video-ChatGPT [35] evaluates the method's effectiveness in dialogue-based video interaction.", + "bbox": [ + 88, + 291, + 482, + 503 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2. Evaluation setup", + "text_level": 1, + "bbox": [ + 89, + 512, + 256, + 527 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In our comparison, we include approaches where the reduction is applied at a single layer, similar to PACT, such as FastV and clustering-based visual token reduction. For these approaches, we refer to the reduction ratio as the relative reduction in the number of visual tokens, defined as $1 - \\frac{\\text{number of visual tokens after reduction}}{\\text{number of visual tokens before reduction}}$ . For all these approaches, we use the same value of $L$ and vary hyperparameters to test across different reduction ratios. For methods that use progressive token reduction, like ToME [6], or apply reduction after the visual encoder, as PruMerge and HiReD, or when the reduction ratio cannot be controlled at a fixed", + "bbox": [ + 89, + 534, + 483, + 700 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/034e296c0cda8f8c270bbdf99aafdb75408397f5567fbda05c825203f40fcaa7.jpg", + "image_caption": [ + "Figure 7. Comparison between PACT and other visual token reduction methods across various reduction ratios applied on InternVL2-8B." + ], + "image_footnote": [], + "bbox": [ + 93, + 719, + 480, + 843 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "layer, such as VTW, we adjust the parameters of these approaches to achieve the same average number of visual tokens across all layers as the one-layer reduction methods for a given reduction ratio. When evaluating clustering algorithms for visual token reduction, we apply proportional attention, as it consistently improves performance across all clustering algorithms, especially at high reduction ratios. Additionally, it is crucial to correctly assign position IDs to the resulting reduced set of visual tokens. Details on the assignment strategy are presented in Appendix E. When reporting processing time or throughput, we take into account the total time required by both the language model and the reduction algorithm per input element. In the next section, we base our comparison on a metric called the Approach-to-Reference Metric Ratio, defined as the average of the ratio of the metric of the tested approach to the metric obtained without visual token reduction across all test datasets. Formally we have Approach-to-Reference Metric Ratio $= \\frac{1}{N} \\sum_{i=1}^{N} \\frac{\\text{Metric with reduction}(i)}{\\text{Metric no reduction}(i)}$ where $N$ is the total number of test datasets. This metric indicates how much of the original model capacity is retained. It is important to note that when using ToME for visual token reduction, a reduction ratio greater than 50% can't be achieved if the number of visual tokens is reduced by a fixed amount in each layer, as suggested in [6]. Instead, we use a scheduler to achieve higher reduction ratios, which we explain in Appendix F. More details on the hyperparameters used for evaluating PACT are provided in Appendix G. We follow the same dataset splits and metrics used for evaluating LLaVA-OneVision wherever feasible. More details are provided in Appendix H. Note that all experiments were conducted on a single A100 GPU.", + "bbox": [ + 511, + 90, + 906, + 577 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3. Results", + "text_level": 1, + "bbox": [ + 513, + 585, + 607, + 599 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We compare PACT with FastV [9], VTW [30], ToME [6], PruMerge [44] and HiRED [3] on LLaVA-OneVision7B, InternVL2-8B, Qwen2-VL-7B-Instruct and LLaVA1.6-Mistral-7B. Since HiRED and PruMerge are only applicable to LLaVA-1.6, we exclude them from other comparisons. As shown in figures 5, 6, 7, and 8 PACT con", + "bbox": [ + 511, + 606, + 905, + 699 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/935d857f27e28290132519c8ef931d40fe2dec2f6c3021d2718a77c20d124ce9.jpg", + "image_caption": [ + "Figure 8. Comparison between PACT and other visual token reduction methods across various reduction ratios applied on LLaVA-1.6-Mistral-7B." + ], + "image_footnote": [], + "bbox": [ + 516, + 719, + 903, + 844 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/6781bd4dacc8d2c302580b8fb7ca94d0f5aaeb7217ecb00a2d70b937c7b1cd3b.jpg", + "table_caption": [ + "Table 2. Comparison of PACT with FastV, VTW, and ToME on LLaVA-OneVision-7B. Algo. Time refers to the average time the algorithm takes per input element, measured in seconds. Proc. Time refers to the average time taken by both the language model and the reduction algorithm per input element. Red. Ratio stands for average Reduction Ratio. The Algo. Time for VTW is nearly zero, and thus omitted. The different visual token reduction methods are evaluated at the same reduction ratio as PACT." + ], + "table_footnote": [], + "table_body": "
DatasetNo reductionPACT (Ours)FastVVTWToME
MetricProc. TimeMetricRed. RatioProc. TimeAlgo. TimeMetricProc. TimeAlgo. TimeMetricProc. TimeMetricProc. TimeAlgo. Time
VideoMME58.50.79257.665.6%0.3690.02157.00.3710.04046.90.29657.00.4170.091
MME15790.5541564.070.2%0.2430.0171576.00.2440.016842.00.2311556.90.3170.084
DocVQA87.21.08884.467.9%0.5190.02684.30.5240.05110.50.44961.90.5760.099
MLVU65.20.79564.766.4%0.3610.02262.90.3690.04054.40.31263.40.4170.092
LLaVA-Interleave64.10.24964.069.7%0.1330.01058.90.1390.00732.40.12350.30.1920.068
ChartQA79.90.67176.568.5%0.3410.01977.00.3420.01616.60.30763.40.4020.082
MMBench80.60.24980.369.3%0.1350.01079.00.1400.00552.40.12579.70.1930.066
MuirBench42.00.38443.167.8%0.1780.01340.40.1780.00934.90.16240.50.2330.072
ScienceQA95.90.23893.869.6%0.1330.01091.60.1370.00680.00.12493.80.1900.066
MMMU49.20.13948.970.4%0.1040.00748.90.1060.00343.50.09348.60.1240.062
AI2D81.50.38281.069.8%0.1860.01379.40.1910.01469.70.17779.70.2440.073
InfographicVQA66.00.89561.964.7%0.4810.02358.60.4830.04024.50.40848.30.6070.130
MMStar62.00.29760.169.7%0.1470.01158.60.1520.00737.20.16560.10.2290.069
ActivityNetQA54.50.92155.170.0%0.4190.02953.70.4250.04236.60.39454.10.5130.203
MM-LiveBench73.14.43471.767.5%3.2120.04764.43.2210.04441.03.08064.23.6070.102
LLaVA-Wilder71.010.1071.570.0%8.2620.03571.08.2630.02548.87.51568.07.9260.085
MathVerse16.80.83116.674.2%0.3610.02116.10.3820.03617.60.30116.50.5590.150
MathVista63.30.44062.070.7%0.2710.01559.50.2720.01638.50.26055.00.3380.071
MMVet58.04.60258.470.4%3.7930.03551.73.7950.03615.73.65247.24.1150.212
Vibe-Eval41.65.15339.171.1%3.7090.03238.23.7140.04712.33.55031.24.3170.095
VideoChatGPT3.252.9723.2567.2%1.8630.0293.221.8660.0401.921.3203.191.9750.205
EgoSchema60.10.81160.166.6%0.3510.02158.70.3530.04444.80.29759.80.3910.091
PerceptionTest52.10.80152.366.9%0.3530.02351.70.3570.04045.00.29651.10.3930.090
TextVQA75.80.69075.067.2%0.3320.02375.50.3360.02911.60.28762.50.3920.087
", + "bbox": [ + 91, + 155, + 906, + 410 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "siently outperforms other methods at both equal reduction ratios and equal throughput across all four models. VTW experiences a significant performance drop for reduction ratios above $40\\%$ , indicating that removing all visual tokens is only effective when done in later layers. FastV and ToME struggle at high reduction ratios, while PruMerge and HiRED exhibit degradation even at low reduction ratios. Meanwhile, PACT maintains acceptable performance even at high reduction ratios. Table 2 and Table 3 shows that PACT outperforms other approaches on most of the test datasets when applied on LLaVA-OneVision-7B and Qwen2-VL-7B-Instruct. The same conclusion applies to other models, with detailed results provided in Appendix I. In Tab. 1, we report the reduction ratio, throughput, and maximum GPU memory consumption of the different approaches at an equal Approach-to-Reference Metric Ra", + "bbox": [ + 88, + 434, + 480, + 676 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/bbee2722cb3ddcd6e6492ed26fea8d5f65ff30ed2c01704da099312ba5eed042.jpg", + "image_caption": [ + "Figure 9. Comparison of DBDPC and other clustering algorithms for visual token reduction at different reduction ratios on LLaVA-OneVision-7B." + ], + "image_footnote": [], + "bbox": [ + 94, + 705, + 285, + 825 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/5d0471b07592f17764f244fb83f43851f0af23eb77d5e45eb7c28eaf67db9494.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 289, + 705, + 480, + 825 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "tio of $98.6\\%$ on LLaVA-OneVision-7B. PACT significantly outperforms the other methods, achieving a reduction ratio of $71.3\\%$ , a GPU memory reduction of $31\\%$ , and a $225\\%$ speedup in the language model's inference time. The per-dataset results used to compute these metrics are shown in Tab. 5. Tab. 1 also indicates that when using FastV, the maximum GPU memory consumption is relatively high due to the costly computation of attention scores. We further compare DBDPC against agglomerative clustering [1], k-means [2], Density Peaks Clustering (DPC) [5], and DBSCAN [14], with results presented in Fig. 9. The graphs reveal that DBDPC consistently outperforms other clustering algorithms for visual token reduction, exhibiting less performance degradation at equal reduction ratios and demonstrating improved computational efficiency, leading to better throughput. These results validate our hypothesis that, for an effective visual token reduction, it is necessary to ensure that the distances between elements within each cluster do not exceed a predefined threshold. Fig. 5 also shows that EUTI consistently outperforms FastV at equal reduction ratios and is less costly, as it does not require the computation of attention scores. In addition, unlike FastV, EUTI does not introduce a GPU memory overhead1. We provide additional numerical results in Appendix I.", + "bbox": [ + 511, + 434, + 906, + 797 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4. Ablation study", + "text_level": 1, + "bbox": [ + 511, + 808, + 661, + 824 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Fig. 5 shows that PACT consistently outperforms both DBDPC and EUTI across various reduction ratios. This confirms that combining clustering and pruning techniques", + "bbox": [ + 511, + 830, + 906, + 876 + ], + "page_idx": 7 + }, + { + "type": "page_footnote", + "text": "$^{1}$ EUTI achieves roughly the same memory reduction as PACT.", + "bbox": [ + 531, + 886, + 862, + 898 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/ac03a20585c83df2d07c8acc08addbb8f9c595884eeff0f8891abb84395e3e2b.jpg", + "table_caption": [ + "Table 3. Comparison of PACT with FastV, VTW, and ToME applied on Qwen2-VL-7B-Instruct across Various Datasets." + ], + "table_footnote": [], + "table_body": "
DatasetNo ReductionPACT (Ours)FastVVTWToME
MetricProc. TimeMetricRed. RatioProc. TimeMetricProc. TimeMetricProc. TimeMetricProc. Time
MME1654.50.2381666.586.3%0.1101500.00.111709.240.1201610.90.140
DocVQA93.90.51690.577.5%0.29486.60.2988.50.24942.90.350
TextVQA81.80.15580.467.5%0.13279.90.13513.20.11866.20.151
InfographicVQA74.60.47870.669.7%0.27863.30.27321.50.22543.90.299
ChartQA80.80.14576.061.1%0.13569.20.13412.90.12355.10.155
MMBench77.60.07477.151.5%0.07777.10.07476.90.07375.90.080
MuirBench40.70.15941.276.9%0.11340.40.11237.90.11175.80.125
MMMU51.40.10951.272.6%0.09349.30.09245.40.08848.90.105
AI2D79.90.10578.464.2%0.09676.20.09769.00.08776.40.115
MMStar56.00.07254.861.3%0.07251.50.06740.30.06553.80.077
EgoSchema62.10.36061.660.0%0.20760.20.21246.30.19061.20.230
MathVerse25.30.62024.582.2%0.39323.70.39613.90.29618.10.651
MathVista59.20.24957.773.3%0.19556.40.19436.80.16553.50.275
MM Vet24.94.70025.180.3%3.82022.33.8302.73.65016.74.780
Vibe-Eval47.53.20046.185.0%2.31044.32.37513.11.99329.63.620
LLaVA-Interleave35.90.12035.573.7%0.10034.70.10133.20.09635.30.125
MM-LiveBench72.63.97070.777.1%3.04063.03.12039.72.97057.64.450
", + "bbox": [ + 94, + 114, + 906, + 354 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "yields better performance than using each approach independently, as the combined method addresses both visual tokens irrelevance and redundancy. We ablate several components of the DBDPC algorithm and present the results in Fig. 10. First, we ablate token merging by selecting the center of each cluster as the representative token instead of merging tokens within each cluster. We also ablate the use of proportional attention. Additionally, we ablate the assignment of position IDs to the reduced set of tokens and experiment with two alternatives: using the mean of position IDs of all elements in each cluster and assigning position IDs sequentially after reordering the reduced set according to the mean of position IDs. Finally, we ablate the use of key vectors in the clustering process and instead use hidden states. Our results show that each ablated component contributes positively to the performance of the DBDPC algorithm. Notably, correctly assigning position IDs to the reduced set is crucial, as these position IDs reflect the structure of input images and the temporal order of input videos. Additionally, proportional attention proves effective at higher reduction ratios, while token merging en", + "bbox": [ + 89, + 378, + 485, + 696 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/f368bf2a470b7eb965c3a82944f7d45511b9292e73f2461e19a596acc4fe5f82.jpg", + "image_caption": [ + "Figure 10. Ablation study of DBDPC and EUTI on LLaVA-OneVision-7B." + ], + "image_footnote": [], + "bbox": [ + 94, + 733, + 285, + 857 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/991768b7b1eccb7cc3fe0e64629c7ae67379f84e5f598eb26031e9d494ff3b94.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 289, + 734, + 480, + 856 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "hances performance once the reduction ratio exceeds $50\\%$ . The figure also confirms that keys are better suited for cosine similarity-based distance calculations, as they are naturally used in dot products within the attention mechanism. We perform two separate ablations on Eq. (2) of the EUTI algorithm. The first ablation removes the use of hidden state norms, while the second ablates the use of the global query, which corresponds to using only the hidden state norms. The results in Fig. 10 show that combining both the global query-based score and the norm of hidden states consistently leads to better results than using either metric alone, suggesting that they provide complementary information about the importance of each visual token. Finally, we ablate the pruned token recovery module in PACT by setting $\\alpha$ to zero, with results presented in Fig. 11. The plot shows that reintegrating visual tokens initially deemed unimportant but close enough to a cluster center consistently enhances performance across different reduction ratios, supporting our hypothesis that these tokens were likely mislabeled by the EUTI module. Figure 11 also shows the effect of the choice of the reduction layer on PACT's performance, demonstrating the effectiveness of our reduction layer identification approach. We provide additional numerical results in Appendix J.", + "bbox": [ + 511, + 378, + 908, + 741 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/a13ddcbf4f57cce0737e9972df5ccd4e3a0e4c3b45929d11bb24821088eda9ec.jpg", + "image_caption": [ + "Figure 11. Ablation study of PACT on LLaVA-OneVision-7B." + ], + "image_footnote": [], + "bbox": [ + 516, + 760, + 692, + 869 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/1ead4ca5498c7b123994ccf382a10967b041bc2ad7b30217535496e774d44fd7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 696, + 760, + 903, + 869 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 925, + 504, + 935 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 91, + 89, + 209, + 104 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "In this work, we presented PACT, a method that addresses both visual token irrelevance and redundancy. PACT is a plug-and-play solution that does not require additional training. It does not rely on textual tokens for visual token reduction, making it well-suited for multi-turn conversations. Additionally, it operates independently of the visual encoder and connector architecture, making it broadly applicable across various Visual Language Models. Our results confirm that the number of visual tokens in Visual Language Models is unnecessarily large and provide valuable insights for effective token reduction. This opens the door for future work in designing more efficient connectors and architectures for VLMs.", + "bbox": [ + 89, + 114, + 483, + 310 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "6. Acknowledgments", + "text_level": 1, + "bbox": [ + 89, + 325, + 267, + 343 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "This work received financial support from Crédit Agricole S.A. through the research chair with Ecole Polytechnique on Trustworthy and Responsible AI. This work was granted access to the HPC resources of IDRIS under the allocation 2024-AD011014793R1 made by GENCI.", + "bbox": [ + 89, + 349, + 483, + 434 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 91, + 449, + 187, + 465 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Marcel R Ackermann, Johannes Blömer, Daniel Kuntze, and Christian Sohler. Analysis of agglomerative clustering. *Algorithmica*, 69:184-215, 2014. 8, 3", + "[2] Mohiuddin Ahmed, Raihan Seraj, and Syed Mohammed Shamsul Islam. The k-means algorithm: A comprehensive survey and performance evaluation. Electronics, 9(8):1295, 2020. 5, 8, 3", + "[3] Kazi Hasan Ibn Arif, JinYi Yoon, Dimitrios S Nikolopoulos, Hans Vandierendonck, Deepu John, and Bo Ji. Hired: Attention-guided token dropping for efficient inference of high-resolution vision-language models in resource-constrained environments. arXiv preprint arXiv:2408.10945, 2024. 2, 7", + "[4] Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-vl: A frontier large vision-language model with versatile abilities. arXiv preprint arXiv:2308.12966, 2023. 1", + "[5] Panthadeep Bhattacharjee and Pinaki Mitra. A survey of density based clustering algorithms. Frontiers of Computer Science, 15:1-27, 2021. 5, 8, 3", + "[6] Daniel Bolya, Cheng-Yang Fu, Xiaoliang Dai, Peizhao Zhang, Christoph Feichtenhofer, and Judy Hoffman. Token merging: Your vit but faster. arXiv preprint arXiv:2210.09461, 2022. 2, 7, 4", + "[7] Keqin Chen, Zhao Zhang, Weili Zeng, Richong Zhang, Feng Zhu, and Rui Zhao. Shikra: Unleashing multimodal llm's referential dialogue magic. arXiv preprint arXiv:2306.15195, 2023. 2", + "[8] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Jiaqi Wang, Yu Qiao," + ], + "bbox": [ + 99, + 473, + 483, + 901 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Dahua Lin, et al. Are we on the right way for evaluating large vision-language models? arXiv preprint arXiv:2403.20330, 2024. 7", + "[9] Liang Chen, Haozhe Zhao, Tianyu Liu, Shuai Bai, Junyang Lin, Chang Zhou, and Baobao Chang. An image is worth 1/2 tokens after layer 2: Plug-and-play inference acceleration for large vision-language models. arXiv preprint arXiv:2403.06764, 2024. 2, 3, 7", + "[10] Zhe Chen, Weiyun Wang, Hao Tian, Shenglong Ye, Zhang-wei Gao, Erfei Cui, Wenwen Tong, Kongzhi Hu, Jiapeng Luo, Zheng Ma, et al. How far are we to gpt-4v? closing the gap to commercial multimodal models with open-source suites. arXiv preprint arXiv:2404.16821, 2024. 2", + "[11] Yunfei Chu, Jin Xu, Xiaohuan Zhou, Qian Yang, Shiliang Zhang, Zhijie Yan, Chang Zhou, and Jingren Zhou. Qwen-audio: Advancing universal audio understanding via unified large-scale audio-language models. arXiv preprint arXiv:2311.07919, 2023. 1", + "[12] Tri Dao, Daniel Y. Fu, Stefano Ermon, Atri Rudra, and Christopher Ré. Flashattention: Fast and memory-efficient exact attention with io-awareness, 2022. 1, 3", + "[13] Mohamed Dhouib, Ghassen Bettaieb, and Aymen Shabou. Docparser: End-to-endOCR-free information extraction from visually rich documents. In International Conference on Document Analysis and Recognition, pages 155-172. Springer, 2023. 2", + "[14] Martin Ester, Hans-Peter Kriegel, Jörg Sander, Xiaowei Xu, et al. A density-based algorithm for discovering clusters in large spatial databases with noise. In kdd, pages 226-231, 1996. 5, 8, 3", + "[15] Chaoyou Fu, Peixian Chen, Yunhang Shen, Yulei Qin, Mengdan Zhang, Xu Lin, Jinrui Yang, Xiawu Zheng, Ke Li, Xing Sun, et al. Mme: A comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394, 2023. 7", + "[16] Chaoyou Fu, Yuhan Dai, Yondong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, et al. Video-mme: The first-ever comprehensive evaluation benchmark of multi-modal llms in video analysis. arXiv preprint arXiv:2405.21075, 2024. 7", + "[17] Tao Gong, Chengqi Lyu, Shilong Zhang, Yudong Wang, Miao Zheng, Qian Zhao, Kuikun Liu, Wenwei Zhang, Ping Luo, and Kai Chen. Multimodal-gpt: A vision and language model for dialogue with humans. arXiv preprint arXiv:2305.04790, 2023. 2", + "[18] Jiaming Han, Kaixiong Gong, Yiyuan Zhang, Jiaqi Wang, Kaipeng Zhang, Dahua Lin, Yu Qiao, Peng Gao, and Xiangyu Yue. Onellm: One framework to align all modalities with language. arXiv preprint arXiv:2312.03700, 2023. 1", + "[19] Shaohan Huang, Li Dong, Wenhui Wang, Yaru Hao, Saksham Singhal, Shuming Ma, Tengchao Lv, Lei Cui, Owais Khan Mohammed, Qiang Liu, et al. Language is not all you need: Aligning perception with language models. arXiv preprint arXiv:2302.14045, 2023. 1", + "[20] Eric Jang, Shixiang Gu, and Ben Poole. Categorical reparameterization with gumbel-softmax. arXiv preprint arXiv:1611.01144, 2016. 2" + ], + "bbox": [ + 516, + 92, + 906, + 898 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[21] Yang Jin, Kun Xu, Liwei Chen, Chao Liao, Jianchao Tan, Bin Chen, Chenyi Lei, An Liu, Chengru Song, Xiaogiang Lei, et al. Unified language-vision pretraining with dynamic discrete visual tokenization. arXiv preprint arXiv:2309.04669, 2023. 2", + "[22] Aniruddha Kembhavi, Mike Salvato, Eric Kolve, Minjoon Seo, Hannaneh Hajishirzi, and Ali Farhadi. A diagram is worth a dozen images. In European conference on computer vision, pages 235-251. Springer, 2016. 6", + "[23] Geewook Kim, Teakgyu Hong, Moonbin Yim, JeongYeon Nam, Jinyoung Park, Jinyeong Yim, Wonseok Hwang, Sangdoo Yun, Dongyoon Han, and Seunghyun Park. Ocr-free document understanding transformer. In Computer Vision – ECCV 2022, pages 498–517, Cham, 2022. Springer Nature Switzerland. 2", + "[24] Bohao Li, Rui Wang, Guangzhi Wang, Yuying Ge, Yixiao Ge, and Ying Shan. Seed-bench: Benchmarking multimodal llms with generative comprehension. arXiv preprint arXiv:2307.16125, 2023. 4", + "[25] Bo Li, Kaichen Zhang, Hao Zhang, Dong Guo, Renrui Zhang, Feng Li, Yuanhan Zhang, Ziwei Liu, and Chunyuan Li. Llava-last: Stronger llms supercharge multimodal capabilities in the wild, 2024. 7", + "[26] Bo Li, Peiyuan Zhang, Kaichen Zhang, Fanyi Pu, Xinrun Du, Yuhao Dong, Haotian Liu, Yuanhan Zhang, Ge Zhang, Chunyuan Li, and Ziwei Liu. Lmms-eval: Accelerating the development of large multimodal models, 2024. 7, 4, 5", + "[27] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024. 2, 4, 5", + "[28] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. arXiv preprint arXiv:2301.12597, 2023. 2", + "[29] Youwei Liang, Chongjian Ge, Zhan Tong, Yibing Song, Jue Wang, and Pengtao Xie. Not all patches are what you need: Expediting vision transformers via token reorganizations. arXiv preprint arXiv:2202.07800, 2022. 2", + "[30] Zhihang Lin, Mingbao Lin, Luxi Lin, and Rongrong Ji. Boosting multimodal large language models with visual tokens withdrawal for rapid inference. arXiv preprint arXiv:2405.05803, 2024. 2, 7", + "[31] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. arXiv preprint arXiv:2304.08485, 2023. 1, 2", + "[32] Yuan Liu, Haodong Duan, Yuanhan Zhang, Songyang Zhang Bo Li, and Wangbo Zhao. Mmbench: Is your multi-modal model an all-around player? arXiv:2307.06281, 2023. 7", + "[33] Pan Lu, Swaroop Mishra, Tony Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter Clark, and Ashwin Kalyan. Learn to explain: Multimodal reasoning via thought chains for science question answering, 2022. 7", + "[34] Pan Lu, Hritik Bansal, Tony Xia, Jiacheng Liu, Chunyuan Li, Hannaneh Hajishirzi, Hao Cheng, Kai-Wei Chang, Michel Galley, and Jianfeng Gao. Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts. In In" + ], + "bbox": [ + 91, + 90, + 480, + 898 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "ternational Conference on Learning Representations (ICLR), 2024. 7", + "[35] Muhammad Maaz, Hanoona Rasheed, Salman Khan, and Fahad Shahbaz Khan. Video-chatgpt: Towards detailed video understanding via large vision and language models. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (ACL 2024), 2024. 7", + "[36] Karttikeya Mangalam, Raiymbek Akshulakov, and Jitendra Malik. Egoschema: A diagnostic benchmark for very long-form video language understanding. Advances in Neural Information Processing Systems, 36:46212-46244, 2023. 7", + "[37] Ahmed Masry, Do Xuan Long, Jia Qing Tan, Shafiq Joty, and Enamul Hoque. Chartqa: A benchmark for question answering about charts with visual and logical reasoning. arXiv preprint arXiv:2203.10244, 2022. 6", + "[38] Minesh Mathew, Dimosthenis Karatzas, and CV Jawahar. Docvqa: A dataset for vqa on document images. In Proceedings of the IEEE/CVF winter conference on applications of computer vision, pages 2200-2209, 2021. 6", + "[39] Minesh Mathew, Viraj Bagal, Ruben Tito, Dimosthenis Karatzas, Ernest Valveny, and CV Jawahar. Infographicvqa. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 1697-1706, 2022. 6", + "[40] Piotr Padlewski, Max Bain, Matthew Henderson, Zhongkai Zhu, Nishant Relan, Hai Pham, Donovan Ong, Kaloyan Aleksiev, Aitor Ormazabal, Samuel Phua, Ethan Yeo, Eugenie Lamprecht, Qi Liu, Yuqi Wang, Eric Chen, Deyu Fu, Lei Li, Che Zheng, Cyprien de Masson d'Autume, Dani Yogatama, Mikel Artetxe, and Yi Tay. Vibe-eval: A hard evaluation suite for measuring progress of multimodal language models. arXiv preprint arXiv:2405.02287, 2024. 7", + "[41] Viorica Patraucean, Lucas Smaira, Ankush Gupta, Adria Recasens, Larisa Markeeva, Dylan Banarse, Skanda Koppula, Mateusz Malinowski, Yi Yang, Carl Doersch, et al. Perception test: A diagnostic benchmark for multimodal video models. Advances in Neural Information Processing Systems, 36, 2024. 7", + "[42] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision, 2021. 2", + "[43] Erich Schubert. A Triangle Inequality for Cosine Similarity, page 32-44. Springer International Publishing, 2021. 1", + "[44] Yuzhang Shang, Mu Cai, Bingxin Xu, Yong Jae Lee, and Yan Yan. Llava-prumerge: Adaptive token reduction for efficient large multimodal models. arXiv preprint arXiv:2403.15388, 2024. 2, 7", + "[45] Amanpreet Singh, Vivek Natarajan, Meet Shah, Yu Jiang, Xinlei Chen, Dhruv Batra, Devi Parikh, and Marcus Rohrbach. Towards vqa models that can read. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8317-8326, 2019. 6", + "[46] Dingjie Song, Wenjun Wang, Shunian Chen, Xidong Wang, Michael Guan, and Benyou Wang. Less is more: A simple yet effective token reduction method for efficient multimodal llms. arXiv preprint arXiv:2409.10994, 2024. 2" + ], + "bbox": [ + 516, + 92, + 906, + 898 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 924, + 506, + 936 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[47] Jianlin Su, Murtadha Ahmed, Yu Lu, Shengfeng Pan, Wen Bo, and Yunfeng Liu. Roformer: Enhanced transformer with rotary position embedding. Neurocomputing, 568:127063, 2024. 4", + "[48] Fei Wang, Xingyu Fu, James Y Huang, Zekun Li, Qin Liu, Xiaogeng Liu, Mingyu Derek Ma, Nan Xu, Wenxuan Zhou, Kai Zhang, et al. Muirbench: A comprehensive benchmark for robust multi-image understanding. arXiv preprint arXiv:2406.09411, 2024. 7", + "[49] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Yang Fan, Kai Dang, Mengfei Du, Xuancheng Ren, Rui Men, Dayiheng Liu, Chang Zhou, Jingren Zhou, and Junyang Lin. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution, 2024. 2", + "[50] Qinghao Ye, Haiyang Xu, Guohai Xu, Jiabo Ye, Ming Yan, Yiyang Zhou, Junyang Wang, Anwen Hu, Pengcheng Shi, Yaya Shi, et al. mplug-owl: Modularization empowers large language models with multimodality. arXiv preprint arXiv:2304.14178, 2023. 2", + "[51] Weihao Yu, Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Zicheng Liu, Xinchao Wang, and Lijuan Wang. Mm-vet: Evaluating large multimodal models for integrated capabilities, 2023. 7", + "[52] Zhou Yu, Dejing Xu, Jun Yu, Ting Yu, Zhou Zhao, Yueting Zhuang, and Dacheng Tao. Activitynet-qa: A dataset for understanding complex web videos via question answering. In AAAI, pages 9127–9134, 2019. 7", + "[53] Xiang Yue, Yuansheng Ni, Kai Zhang, Tianyu Zheng, Ruoqi Liu, Ge Zhang, Samuel Stevens, Dongfu Jiang, Weiming Ren, Yuxuan Sun, Cong Wei, Botao Yu, Ruibin Yuan, Ren-liang Sun, Ming Yin, Boyuan Zheng, Zhenzhu Yang, Yibo Liu, Wenhao Huang, Huan Sun, Yu Su, and Wenhu Chen. Mmmu: A massive multi-discipline multimodal understanding and reasoning benchmark for expert agi. In Proceedings of CVPR, 2024. 7", + "[54] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training, 2023. 2", + "[55] Duzhen Zhang, Yahan Yu, Chenxing Li, Jiahua Dong, Dan Su, Chenhui Chu, and Dong Yu. Mm-llms: Recent advances in multimodal large language models. arXiv preprint arXiv:2401.13601, 2024. 1", + "[56] Hang Zhang, Xin Li, and Lidong Bing. Video-llama: An instruction-tuned audio-visual language model for video understanding. arXiv preprint arXiv:2306.02858, 2023. 1", + "[57] Renrui Zhang, Dongzhi Jiang, Yichi Zhang, Haokun Lin, Ziyu Guo, Pengshuo Qiu, Aojun Zhou, Pan Lu, Kai-Wei Chang, Peng Gao, and Hongsheng Li. Mathverse: Does your multi-modal llm truly see the diagrams in visual math problems?, 2024. 7", + "[58] Junjie Zhou, Yan Shu, Bo Zhao, Boya Wu, Shitao Xiao, Xi Yang, Yongping Xiong, Bo Zhang, Tiejun Huang, and Zheng Liu. Mlvu: A comprehensive benchmark for multi-task long video understanding. arXiv preprint arXiv:2406.04264, 2024. 7" + ], + "bbox": [ + 91, + 92, + 480, + 878 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 924, + 506, + 935 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "PACT: Pruning and Clustering-Based Token Reduction for Faster Visual Language Models Supplementary Materials", + "text_level": 1, + "bbox": [ + 119, + 99, + 883, + 135 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A. On the density peaks clustering algorithm", + "text_level": 1, + "bbox": [ + 89, + 154, + 470, + 171 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Density Peak Clustering (DPC) is a clustering algorithm that identifies cluster centers based on local density and the distance to points with higher density, denoted as $\\delta_{i}$ . The density, $\\rho_{i}$ , can be measured by counting the number of points within a cutoff distance $d_{c}$ from $\\mathbf{u}_{i}$ , or by using a Gaussian function where nearby points contribute more to the density, $\\rho_{i} = \\sum_{j}\\exp \\left(-\\left(\\frac{d_{ij}}{d_c}\\right)^2\\right)$ . Points with high $\\rho_{i}$ and $\\delta_{i}$ values are selected as cluster centers. This selection can be done by defining a threshold $t$ and designating points as cluster centers where $\\rho_{i}\\cdot \\delta_{i}\\geq t\\times \\max (\\rho_{i}\\cdot \\delta_{i})$ , or by selecting a fixed percentage. Other points are then assigned to the cluster of the nearest higher-density point, iterating from the highest to the lowest density. This process can create clusters of varying shapes, where the maximum distance between elements within a cluster can be extremely large. In extreme cases, the two farthest points in the input data can end up in the same cluster.", + "bbox": [ + 89, + 180, + 483, + 450 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "B. DBDPC Characteristics", + "text_level": 1, + "bbox": [ + 89, + 465, + 318, + 482 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "This section aims to prove that DBDPC guarantees that: Each element's distance to its assigned cluster center is at most $d_{c}$ and that all cluster centers are at least $d_{c}$ apart.", + "bbox": [ + 89, + 492, + 483, + 537 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Assume, for contradiction, that at least one of the following statements is false:", + "bbox": [ + 89, + 537, + 482, + 566 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. There exists an element $i$ assigned to a cluster such that its distance to the cluster center is greater than $d_{c}$ , i.e., $d_{is} > d_{c}$ .", + "2. There exist two cluster centers $s_1, s_2$ such that their pairwise distance is at most $d_c$ , i.e., $d_{s_1s_2} \\leq d_c$ ." + ], + "bbox": [ + 89, + 568, + 482, + 646 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Contradiction for Assumption 1 In DBDPC, each element $i$ is assigned to its closest cluster center:", + "bbox": [ + 89, + 665, + 482, + 695 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\ns_{i} = \\arg \\min_{s\\in C_{\\text{centers}}}d_{is}.\n$$\n", + "text_format": "latex", + "bbox": [ + 215, + 709, + 357, + 732 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "If $d_{is} > d_c$ for a given center $s$ , then we have $d_{is'} > d_c$ for all centers. However, in the DBDPC selection process, an element is assigned as a cluster center if its minimum distance to already selected centers is over $d_c$ . Thus, $i$ should have been selected as a new cluster center, and its distance to the closest cluster center would be zero, which leads to a contradiction, proving that every element satisfies $d_{is} \\leq d_c$ .", + "bbox": [ + 89, + 743, + 483, + 849 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Contradiction for Assumption 2 Assume, without loss of generality, that $s_2$ is chosen after $s_1$ . By the center selec", + "bbox": [ + 89, + 869, + 483, + 901 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "tion criterion, a new center $s_2$ is added only if:", + "bbox": [ + 511, + 156, + 821, + 171 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\min_{s\\in C_{\\text{centers}}}d_{s_{2}s} > d_{c}.\n$$\n", + "text_format": "latex", + "bbox": [ + 645, + 183, + 769, + 205 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "If $d_{s_1 s_2} \\leq d_c$ , then $s_2$ shouldn't be selected as a cluster center, which leads to a contradiction. Thus, no two centers can be closer than $d_c$ .", + "bbox": [ + 511, + 215, + 903, + 260 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Inter-cluster distance upper-bound : Here we will refer to cosine similarity by sim. Let's $x$ and $y$ be two points in the same cluster, and $s$ their cluster center. Since each point $\\mathbf{x}$ is within $d_c$ of its cluster center $\\mathbf{s}$ and the distance used in the DBDPC algorithm is $1 - \\mathrm{sim}$ , we have $\\mathrm{sim}(\\mathbf{x},\\mathbf{s})\\geq 1 - d_c$ . We have from [43]:", + "bbox": [ + 511, + 261, + 905, + 351 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {s i m} (\\mathbf {x}, \\mathbf {y}) \\geq \\operatorname {s i m} (\\mathbf {x}, \\mathbf {s}) \\cdot \\operatorname {s i m} (\\mathbf {s}, \\mathbf {y}) + m - 1,\n$$\n", + "text_format": "latex", + "bbox": [ + 552, + 362, + 864, + 378 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\text {w h e r e} m = \\min \\left\\{\\operatorname {s i m} (\\mathbf {x}, \\mathbf {s}) ^ {2}, \\operatorname {s i m} (\\mathbf {s}, \\mathbf {y}) ^ {2} \\right\\}.\n$$\n", + "text_format": "latex", + "bbox": [ + 565, + 388, + 849, + 414 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Using $\\mathrm{sim}(\\mathbf{x},\\mathbf{s}),\\mathrm{sim}(\\mathbf{s},\\mathbf{y})\\geq 1 - d_c$ we get", + "bbox": [ + 511, + 420, + 805, + 435 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {s i m} (\\mathbf {x}, \\mathbf {y}) \\geq (1 - d _ {c}) ^ {2} + (1 - d _ {c}) ^ {2} - 1 = 1 - 2 d _ {c} (2 - d _ {c}).\n$$\n", + "text_format": "latex", + "bbox": [ + 511, + 445, + 910, + 462 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Finally, converting this back to the distance $d(\\mathbf{x}, \\mathbf{y}) = 1 - \\sin(\\mathbf{x}, \\mathbf{y})$ , we obtain:", + "bbox": [ + 511, + 473, + 905, + 503 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\nd (\\mathbf {x}, \\mathbf {y}) \\leq 2 d _ {c} (2 - d _ {c}).\n$$\n", + "text_format": "latex", + "bbox": [ + 620, + 513, + 795, + 531 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Therefore, the intra-cluster distance in the DBDPC algorithm is bounded by $2d_{c}(2 - d_{c})$ .", + "bbox": [ + 511, + 542, + 905, + 571 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "C. A comparison between DBDPC and other clustering algorithms", + "text_level": 1, + "bbox": [ + 511, + 585, + 905, + 619 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Comparison between DBDPC and DPC: We note that, aside from using densities, DBDPC is fundamentally different from DPC. Please refer to Appendix A for a detailed explanation of the DPC algorithm. The center identification process in DBDPC results in two main characteristics with formal proof detailed in Appendix B. First, the distance between each element and its cluster center is below $d_{c}$ , which leads to inter-cluster distances being upper-bounded by $2d_{c} \\times (2 - d_{c})$ . Additionally, the distance between cluster centers is lower-bounded by $d_{c}$ . These guarantees do not hold for DPC, leading to two drawbacks. Since intercluster distances are not controlled, merging these vectors may result in merging highly dissimilar vectors, leading to information loss. Also, in high-density regions, the distance between cluster centers becomes too small, making DPC ineffective in addressing information redundancy.", + "bbox": [ + 511, + 628, + 906, + 869 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A Qualitative comparison Figure 12 presents the clustering results for DBDPC, DPC, DBSCAN, and K-Means on a", + "bbox": [ + 511, + 869, + 905, + 900 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 925, + 503, + 935 + ], + "page_idx": 12 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Algorithm 4 Recursive Center Identification for DBDPC with Iterative Center Identification" + ], + "code_body": "Input: Cutoff distance $d_{c}\\in \\mathbb{R}^{+}$ , set of vectors $\\mathbf{U} = \\{\\mathbf{u}_i\\in$ $\\mathbb{R}^{d_l}\\}_{i = 1}^n$ , density values $\\{\\rho_i\\}_{i = 1}^n$ , distance matrix $D =$ $[d_{ij}]$ , fallback threshold $T > 0$", + "guess_lang": "txt", + "bbox": [ + 91, + 123, + 483, + 170 + ], + "page_idx": 13 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Output: Cluster center indices $C_{\\text{centers}}$ Initialize cluster center set $C_{\\text{centers}} =$", + "guess_lang": "txt", + "bbox": [ + 91, + 170, + 352, + 199 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Set the density of each point :", + "bbox": [ + 107, + 200, + 305, + 215 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\rho_ {i} = \\mathrm {a r g s o r t} \\big (\\{- \\rho_ {j} \\} _ {j = 1} ^ {n} \\big) [ i ]\n$$\n", + "text_format": "latex", + "bbox": [ + 202, + 226, + 385, + 246 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "while $\\mathbf{U}\\neq \\emptyset$ do", + "bbox": [ + 107, + 253, + 220, + 268 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Compute $\\delta_{i}$ for all vectors $\\mathbf{u}_i\\in \\mathbf{U}$", + "bbox": [ + 130, + 268, + 364, + 285 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\delta_ {i} = \\min _ {\\rho_ {j} > \\rho_ {i}} d _ {i j}\n$$\n", + "text_format": "latex", + "bbox": [ + 245, + 297, + 341, + 321 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Select cluster candidates:", + "bbox": [ + 130, + 330, + 300, + 345 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {C} _ {\\text {n e w}} = \\left\\{\\mathbf {u} _ {i} \\in \\mathbf {U} \\mid \\delta_ {i} > d _ {c} \\right\\}\n$$\n", + "text_format": "latex", + "bbox": [ + 199, + 358, + 388, + 375 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "$C_{\\mathrm{centers}} \\gets C_{\\mathrm{centers}} \\cup \\mathbf{C}_{\\mathrm{new}}$", + "bbox": [ + 130, + 386, + 302, + 401 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Update remaining vectors:", + "bbox": [ + 132, + 402, + 308, + 417 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {U} \\leftarrow \\mathbf {U} \\backslash \\left(\\mathbf {C} _ {\\text {n e w}} \\cup \\left\\{\\mathbf {u} _ {k} \\in \\mathbf {U} \\mid \\begin{array}{c} \\exists \\mathbf {u} _ {i} \\in \\mathbf {C} _ {\\text {n e w}} \\\\ \\text {s u c h t h a t} d _ {i k} \\leq d _ {c} \\end{array} \\right\\}\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 106, + 439, + 478, + 474 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "if $|\\mathbf{C}_{\\mathrm{new}}| < T$ then", + "bbox": [ + 132, + 483, + 261, + 500 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Order remaining vectors $\\mathbf{U}$ by decreasing $\\rho_{i}$ :", + "bbox": [ + 156, + 500, + 455, + 513 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "$\\mathbf{U}\\gets \\mathrm{Sort}(\\mathbf{U},\\mathrm{key} = \\rho_{i},\\mathrm{order} = \\mathrm{descending})$", + "bbox": [ + 156, + 515, + 455, + 530 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Call Iterative Center Identification:", + "bbox": [ + 156, + 530, + 388, + 542 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "$C_{\\mathrm{centers}} \\gets$ IterativeCenterIdentification( $C_{\\mathrm{centers}}$ , $\\mathbf{U}$ , $d_c$ )", + "bbox": [ + 156, + 544, + 513, + 560 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "return $C_{\\mathrm{centers}}$", + "bbox": [ + 156, + 561, + 253, + 574 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "end if", + "bbox": [ + 132, + 575, + 176, + 588 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "end while", + "bbox": [ + 107, + 590, + 178, + 603 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "return $C_{\\mathrm{centers}}$", + "bbox": [ + 107, + 604, + 205, + 619 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Function: Iterative Center Identification", + "text_level": 1, + "bbox": [ + 107, + 633, + 393, + 646 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Inputs: Remaining vectors $\\mathbf{U}$ (ordered by $\\rho_{i}$ ), current cluster center set $C_{\\mathrm{centers}}$ , cutoff distance $d_{c}$", + "bbox": [ + 107, + 648, + 482, + 678 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Outputs: Updated cluster center indices $C_{\\text{centers}}$", + "bbox": [ + 107, + 679, + 423, + 694 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "for all $\\mathbf{u}_i\\in \\mathbf{U}$ do", + "bbox": [ + 107, + 694, + 228, + 707 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "if $\\min_{\\mathbf{u}_s\\in C_{\\mathrm{centers}}}d_{is} > d_c$ then", + "bbox": [ + 133, + 708, + 328, + 724 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "$C_{\\mathrm{centers}} \\gets C_{\\mathrm{centers}} \\cup \\{\\mathbf{u}_i\\}$", + "bbox": [ + 156, + 724, + 323, + 739 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "end if", + "bbox": [ + 133, + 739, + 176, + 751 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "end for", + "bbox": [ + 107, + 753, + 161, + 766 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "return $C_{\\mathrm{centers}}$", + "bbox": [ + 107, + 768, + 204, + 782 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "predefined set of two-dimensional points. The figure shows that only DBDPC and DBSCAN identify isolated points as distinct clusters, a crucial feature for visual token reduction, as these points contain unique and thus potentially valuable information. We note that, for DBSCAN, these isolated", + "bbox": [ + 89, + 824, + 483, + 898 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/bc4c9351f57e236b7889f9a925abb16135b22bfdb57da6783bf7f4536a9da928.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 517, + 90, + 715, + 190 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/922cdcffc7ad49660bc707651ee240d440711902642b8a121d23064d7db11c56.jpg", + "image_caption": [ + "Figure 12. An illustrative example of the difference in clustering characteristics between DBDPC and other clustering algorithms. Two-dimensional points and the Euclidean distance were used for illustration purposes." + ], + "image_footnote": [], + "bbox": [ + 517, + 196, + 715, + 291 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/20af734f0650efb1295f05d68e52bb6af99dca6da5f563347afdb01dacadf413.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 715, + 90, + 911, + 190 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/e4261b04fef109488d07af722c5985ca2a3835c943c104f033a8b1cf2613810e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 715, + 196, + 911, + 291 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "points may be identified as noise, depending on the chosen hyperparameters. Moreover, DBDPC partitions both the left and right groups of points into the same number of clusters, maintaining consistency despite the higher density on the left side. In contrast, DPC tends to form a greater number of clusters in high-density regions while creating large clusters in low-density areas, whereas DBSCAN follows the opposite pattern, producing large clusters in high-density regions. In the context of visual token reduction, merging points within these large clusters can result in information loss, leading to performance degradation and making DPC and DBSCAN less suitable than DBDPC for this task. We note that the results presented in Fig. 12 for DPC and DBSCAN may change when modifying the hyperparameters; however, the characteristics discussed above persist across different hyperparameter choices.", + "bbox": [ + 511, + 385, + 906, + 627 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "D. Efficient center identification in DBDPC", + "text_level": 1, + "bbox": [ + 511, + 641, + 879, + 657 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "D.1. A recursive approach", + "text_level": 1, + "bbox": [ + 511, + 666, + 718, + 683 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "To enhance the efficiency of the DBDPC algorithm, we introduce a recursive center identification method that reduces computational overhead while maintaining clustering accuracy. In the DBDPC algorithm, vectors are processed in descending order of their local densities $\\rho_{i}$ , and a vector $\\mathbf{u}_i$ is selected as a cluster center if it is farther than the cutoff distance $d_c$ from all previously selected centers. Implementing this as described in the algorithm requires sequentially iterating through all the vectors and checking distances to all previously selected centers, which does not fully leverage GPU parallelization capabilities. In the DBDPC algorithm, when two points have the same density, one is treated as if it has a higher density than the other, depending on the order of their processing. To replicate this behavior, we assign the", + "bbox": [ + 511, + 688, + 906, + 900 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 924, + 504, + 936 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "density of each point to its rank as:", + "bbox": [ + 89, + 90, + 323, + 104 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\rho_ {i} = \\operatorname {r a n k} _ {i} = \\operatorname {a r g s o r t} \\left(\\left\\{- \\rho_ {j} \\right\\} _ {j = 1} ^ {n}\\right) [ i ]\n$$\n", + "text_format": "latex", + "bbox": [ + 166, + 117, + 406, + 135 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Our accelerated method leverages the quantity $\\delta_{i}$ , representing the minimum distance from vector $\\mathbf{u}_i$ to any higher-density vector:", + "bbox": [ + 89, + 146, + 482, + 191 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\delta_ {i} = \\min _ {\\rho_ {j} > \\rho_ {i}} d _ {i j} \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 236, + 205, + 482, + 229 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "If $\\delta_{i} > d_{c}$ , then $\\mathbf{u}_{i}$ is selected as a cluster center because it is not within $d_{c}$ of any higher-density vector, which are the only potential cluster centers that can be selected before $d_{ij}$ in the DBDPC algorithm. In addition, any vector within $d_{c}$ of a cluster center identified using $\\delta_{i}$ has a lower density than that center, as cluster centers identified using $\\delta_{i}$ are not within $d_{c}$ of any higher-density vector. In the DBDPC algorithm, such a vector would not be chosen as a cluster center because it violates the distance condition relative to already selected centers. By identifying these vectors early, we can exclude them from further consideration as potential centers. We repeat this process recursively: after selecting cluster centers where $\\delta_{i} > d_{c}$ and excluding vectors within $d_{c}$ of these centers, we process the remaining vectors. This recursion continues until the number of newly discovered cluster centers becomes small (e.g., less than 10). At that point, we fall back to the DBDPC method, processing the remaining vectors iteratively to ensure all potential centers are considered. This recursive approach reduces the number of iterations in the main loop and enhances parallelization, particularly on GPUs, by minimizing sequential computation. By leveraging $\\delta_{i}$ and incorporating an early exclusion mechanism, the recursive center identification method reduces computational time while ensuring the same clustering results as the DBDPC algorithm. The recursive approach decreases the number of iterations and enhances GPU parallelization by minimizing sequential computation, making the algorithm more efficient for large datasets. The recursive center identification method is presented in Algorithm 4. We note that in practice this recursive approach reduces the computational time of the DBDPC algorithm by around 3 times.", + "bbox": [ + 91, + 234, + 483, + 718 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "D.2. Proof of correctness of the recursive approach", + "text_level": 1, + "bbox": [ + 89, + 727, + 482, + 744 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "To validate the correctness of the accelerated method, we demonstrate the following key points: selected centers are valid cluster centers, excluded vectors are not cluster centers and identifying remaining cluster centers is equivalent to identifying cluster centers on the reduced set. Proving these points suffices to establish correctness, as the remaining vectors after the recursive steps are treated the same as in the DBDPC algorithm.", + "bbox": [ + 89, + 750, + 482, + 869 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Selected Centers Are Valid Cluster Centers In the DB-DPC algorithm, for any vector $\\mathbf{u}_i$ , only vectors with higher", + "bbox": [ + 89, + 869, + 482, + 901 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "densities are considered for selection as cluster centers before $\\mathbf{u}_i$ . If $\\mathbf{u}_i$ is not within $d_c$ of any higher-density vector (i.e., $\\delta_i > d_c$ ) then the distance of $\\mathbf{u}_i$ from any previously selected center cannot exceed the cutoff distance $d_c$ . Consequently, $\\mathbf{u}_i$ satisfies the condition for being a cluster center in the DBDPC algorithm, as it is farther than $d_c$ from all centers processed earlier.", + "bbox": [ + 511, + 90, + 903, + 196 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Excluded Vectors Are Not Cluster Centers Vectors within $d_{c}$ of a cluster center identified using $\\delta_{i}$ have lower densities than that center, as these centers are not within $d_{c}$ to any higher density point. In the DBDPC algorithm, such vectors would not be selected as cluster centers because they are within $d_{c}$ to an already selected center, violating the distance condition. Therefore, excluding these vectors early does not affect the selection of valid cluster centers.", + "bbox": [ + 511, + 196, + 903, + 316 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Identifying Remaining Cluster Centers is Equivalent to Identifying Cluster Centers on the Reduced Set After selecting cluster centers where $\\delta_{i} > d_{c}$ and excluding vectors within $d_{c}$ of these centers, we focus on the reduced set of remaining vectors for further processing. The critical observation is that the previously selected cluster centers are not within $d_{c}$ of any vector in the reduced set. This is ensured by the exclusion step, where all vectors within $d_{c}$ of these centers have been removed. Consequently, when identifying new cluster centers within the reduced set, we do not need to consider distances to the previously selected centers, as they cannot influence the selection due to their distance. Moreover, the vectors that have been excluded are not potential cluster centers themselves. Meaning that they can not influence the center selection process. This means that any vector satisfying $\\delta > d_{c}$ in the reduced set, is actually not within $d_{c}$ to any higher density potential cluster center form the initial set, making it a cluster center.", + "bbox": [ + 511, + 316, + 903, + 590 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "E. On the choice of Positional IDs for clustering algorithms", + "text_level": 1, + "bbox": [ + 511, + 599, + 903, + 635 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In our work, we benchmark four clustering algorithms: agglomerative clustering [1], k-means [2], Density Peaks Clustering (DPC) [5], and DBSCAN [14]. For each algorithm, we use the key vectors for clustering, apply a cosine similarity-based distance (as in DBDPC), and evaluate two strategies: merging the hidden states within each cluster or selecting the cluster center as a representative token. We report the best-performing approach for each algorithm. Similar to DBDPC, we assign the position ID of the cluster center to the resulting vectors. However, apart from DPC, the other clustering algorithms do not explicitly provide a cluster center. For k-means and agglomerative clustering, we select the cluster center as the point closest to the average of all points in the cluster, using keys and cosine similarity. For DBSCAN, we experimented with choosing the point connected to the most other points within the cluster and found this approach to yield slightly better results, aligning", + "bbox": [ + 511, + 643, + 906, + 902 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "better with the principles of DBSCAN. Thus, we adopted this strategy in our tests.", + "bbox": [ + 89, + 90, + 480, + 122 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "F. More about applying ToME to Visual Language Models", + "text_level": 1, + "bbox": [ + 89, + 132, + 482, + 167 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "ToMe reduces the number of visual tokens at each layer of the transformer. For a given layer $i$ , the process starts by splitting the tokens into two distinct sets, A and B. Each token in set A is matched with its most similar counterpart in set B, using cosine similarity based on key vectors to determine the closest pairs. The top $r_i$ pairs with the highest similarity are then selected for merging. Connected components from the matched pairs are combined into single vectors, where hidden states are averaged. It is important to note that each connected component contains exactly one element from set B, and when applying ToME to Visual Language Models, this element's position ID is assigned to the merged token. In [6], the number of visual tokens was reduced by a fixed quantity $(r_i = r)$ . However, this fixed reduction scheme cannot achieve more than a $50\\%$ reduction unless no reduction is done at later layers when the number of tokens drops below $r$ , which goes against the gradual reduction strategy proposed in ToMe. To enable higher reduction ratios, we adopt a linearly decreasing scheduler, where the reduction is higher in early layers and decreases in later layers. This approach achieves a smaller average number of visual tokens across the network while still reducing the token count at each layer, allowing us to reach high reduction ratios effectively.", + "bbox": [ + 89, + 176, + 482, + 539 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "G. Implementation details and hyperparameters for PACT", + "text_level": 1, + "bbox": [ + 89, + 554, + 482, + 589 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "For all experiments on LLaVA-OneVision-7B, we set $d_{n} = 2$ , $\\alpha = 1.5$ , and $L = 4$ . While the optimal values of each parameter may vary depending on the dataset, we aim to evaluate the real-world effectiveness of our approach by using consistent values across all testing datasets. The results in Tab. 2 were obtained using $d_{c} = 0.21$ and $\\lambda = 0.55$ while those in Tab. 1 were obtained using $d_{c} = 0.17$ and $\\alpha = 0.7$ . Additionally, to demonstrate the performance of our approach at different reduction ratios, we vary $d_{c}$ and $\\lambda$ and report the results. The values of the fixed parameters $d_{n}$ and $\\alpha$ were chosen by performing a grid search on SeedBench [24], which is why we do not include Seed-Bench in the testing datasets. It is important to note that finding the optimal parameters for all testing datasets is not the focus of this study, as this would require extensive testing of different values for $d_{c}$ , $\\lambda$ , $L$ , $\\alpha$ , and $d_{n}$ on all test sets. Such an approach would not accurately reflect the real-world performance of our method. Instead, we chose to only vary $d_{c}$ and $\\lambda$ to evaluate the effectiveness of our approach at different reduction ratios. When", + "bbox": [ + 89, + 598, + 482, + 900 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "testing on SeedBench, we found that a pruning ratio higher than $60\\%$ harms performance. Therefore, we vary the pruning ratio between $10\\%$ and $60\\%$ and test across different values of $d_{c}$ . When testing PACT on LLaVA-1.6-Mistral-7B, Qwen2-VL-7B-Instruct and InternVL2-8B. We use the same values of $d_{n}$ and $\\alpha$ as when testing on LLaVA-OneVision-7B. We note that these hyperparameters may not be optimal; however, as we aim to test the generalizability of our approach, we opt to use the same hyperparameters across models. Figure 13, Figure 14 and Figure 15 show the maximum distance between the keys at several layers of the language model for LLaVA-1.6-Mistral-7B, Qwen2-VL-7B-Instruct and InternVL2-8B. Following the same approach for LLaVA-OneVision-7B, we choose $L = 4$ for Qwen2-VL-7B-Instruct and $L = 7$ for InternVL2-8B. We note that the choice of the reduction layer for InternVL2-8B is not as evident as for LLaVA-OneVision-7B and Qwen2-VL-7B-Instruct, as the increase in maximum distance from one layer to the next is sometimes minimal, making it unclear which layer offers the best balance between accuracy and computational efficiency. However, since we do not aim to experimentally determine the optimal reduction layer, we end up choosing $L = 7$ , as the maximum distance between keys is increased by an acceptable amount between the seventh and eighth layer. Following the same approach we use $L = 7$ for LLaVA-1.6-Mistral-7B.", + "bbox": [ + 511, + 90, + 906, + 484 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "H. More about test datasets and used metrics", + "text_level": 1, + "bbox": [ + 511, + 497, + 895, + 513 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "For evaluating the different approaches, we use LMMs-Eval [26] and aim to follow the same dataset splits and metrics as used in [27]. We detail the used splits and metrics in Tab. 4. Some datasets require evaluation using a GPT model through the OPENAI API or other closed-source models. However, for many datasets the version of the closed-source model used in evaluating LLaVA-OneVision in [27] is no longer available. So we use the latest version of GPT-4 for our assessments at the time of publication (gpt-4o-2024-08-06). We also observed that when calling a closed-source model like GPT-4 via an API, the responses are not fully deterministic, even with a temperature set to zero, introducing some noise into the evaluation metrics. To reduce this noise, we exclude all these datasets when testing across different reduction ratios. On the other hand, for Tab. 1, we exclude MMVet, Vibe-Eval, VideoChatGPT, MM-LiveBench, and LLaVA-Wilder as they have high inference times, which would dominate the throughput calculation.", + "bbox": [ + 511, + 523, + 906, + 792 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "For certain datasets, such as DocVQA, InfoVQA, and TextVQA, we use the validation split contrary to [27]. This choice allows us to test various reduction ratios and approaches without requiring submission to the test server, which would be impractical for extensive testing. For datasets requiring a test set submission (EgoSchema and PerceptionTest), where either the validation set is typically", + "bbox": [ + 511, + 795, + 908, + 902 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/2018b92a3ceb38fe58df20c32ffb33e26839e109a7d40b3c17c99039061b7f48.jpg", + "image_caption": [ + "Figure 13. Illustration of the maximum distance between the keys of visual tokens for the first 10 layers of LLaVA-1.6-Mistral-7B before the application of rotary embeddings." + ], + "image_footnote": [], + "bbox": [ + 107, + 89, + 467, + 270 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "not used for evaluation or does not exist, we report the submission-based metrics evaluated directly on the test set. As explained above, for some datasets our evaluation setup differs from the one used for evaluating LLaVA-OneVision in [27], which may result in variations in the reported results for this model on certain datasets. This is primarily due to the use of validation splits for DocVQA, InfoVQA, and TextVQA, as well as the reliance on GPT-based metrics for some datasets (a common practice for these benchmarks, making alternative evaluation difficult). Nevertheless, our comparisons remain fair, as the same evaluation procedure is consistently applied across all approaches and reduction ratios.", + "bbox": [ + 91, + 348, + 480, + 542 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We note that when using reduction methods, results may include slight variations due to edge cases where distances or importance metrics for different vectors are equal. That's why we report results based on the average of three different runs for each dataset.", + "bbox": [ + 93, + 545, + 480, + 619 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Notably, when testing on Qwen2-VL-7B-Instruct without reduction, some datasets encountered GPU out-of-memory errors (MLVU, VideoMME, and ActivityNet Perception) which we excluded from the test set. Additionally, results on ScienceQA were quite low when tested without reduction (0.132), leading to its exclusion from testing as well. We note that, as we use LMM-Eval [26] for evaluation, results differ for some datasets from the officially reported results, as prompts are sometimes not formatted in the same manner. This observation also applies to InternVL2-8B.", + "bbox": [ + 93, + 619, + 480, + 784 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "I. Additional numerical results", + "text_level": 1, + "bbox": [ + 93, + 800, + 349, + 815 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Table 8 and Tab. 9 show a comparison of DBDPC and various clustering algorithms for a reduction ratio of approximately $60\\%$ on LLaVA-OneVision-7B across multiple datasets. The results demonstrate that DBDPC outperforms other clustering algorithms in visual token reduction for the", + "bbox": [ + 93, + 825, + 480, + 898 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/1cf0b451a2b9e5ba443debf65c14f58ca11284800c693f17f3908ef610c04e2b.jpg", + "image_caption": [ + "Figure 14. Illustration of the maximum distance between the keys of visual tokens for the first 10 layers of Qwen2-VL-7B-Instruct before the application of rotary embeddings." + ], + "image_footnote": [], + "bbox": [ + 531, + 90, + 885, + 294 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "majority of the datasets. Additionally, the tables show that the clustering process for DBDPC is significantly faster than that of other clustering algorithms. Table 10 presents a comparison of EUTI-based visual token pruning and FastV for a reduction ratio of approximately $60\\%$ on LLaVA-OneVision-7B across various datasets. The results indicate that EUTI outperforms FastV on most datasets while also being more computationally efficient. Table 15 shows that using keys for distance calculations in DBDPC outperforms hidden states across the majority of the test datasets. Also, we present a comparison between PACT and other visual reduction techniques for InternVL2-8B, and LLaVA-1.6-Mistral-7B across different datasets in Tab. 6, and Tab. 7.", + "bbox": [ + 516, + 400, + 903, + 595 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/4f0258050dc5d3c9c34a559923d1aba15b7d6193026a4795cf763cec369c5ee9.jpg", + "image_caption": [ + "Figure 15. Illustration of the maximum distance between the keys of visual tokens for the first 10 layers of InternVL2-8B before the application of rotary embeddings." + ], + "image_footnote": [], + "bbox": [ + 532, + 646, + 885, + 842 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 925, + 503, + 935 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "J. Ablation study : Additional numerical results", + "text_level": 1, + "bbox": [ + 94, + 89, + 480, + 121 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Table 11 shows a comparison between PACT, DBDPC, and EUTI for a reduction ratio of approximately $70\\%$ , applied on LLaVA-OneVision-7B. The results demonstrate that PACT, which combines both clustering and pruning, outperforms the other two methods that are either clustering-based or pruning-based across various datasets. More importantly, DBDPC and EUTI exhibit a significant drop in performance on some of the datasets, which is not the case for PACT. We note that numerical results for the ablation studies conducted on DBDPC, EUTI, and PACT can be found in Tab. 12, Tab. 13 and Tab. 14.", + "bbox": [ + 94, + 132, + 480, + 297 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 926, + 503, + 935 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/4fddf2bbe561b78a54a7f7ffc2b2961bf3a801236cc2b45eb20735bc0616777f.jpg", + "table_caption": [ + "Table 4. Dataset Splits, Subsets, and Evaluation Metrics Used in Our Experiments. Default indicates the use of the standard test split or cases where only one split/subset is available. The evaluation metrics employed are those commonly used for the respective datasets and generally the ones proposed in the official papers. For GPT-based scores (or any model-based scores), this means that a GPT model was used during evaluation, typically to extract answers from the generated output text, which are then matched with the ground truth to calculate accuracy using exact matches. When accuracy is reported, it generally implies that only an exact match is considered a correct answer." + ], + "table_footnote": [], + "table_body": "
DatasetSplitSubsetEvaluation Metric
VideoMMEDefaultNo subtitlesAccuracy
MMEDefaultDefaultMME Perception Score
DocVQAValidationDefaultANLS
MLVUDefaultDefaultAccuracy
LLaVA-InterleaveDefaultOut-domainAccuracy
ChartQAValidationDefaultRelaxed Accuracy
MMBenchValidationEnglishGPT-based Score
MuirBenchDefaultDefaultAccuracy
ScienceQADefaultVision onlyAccuracy
MMMUValidationDefaultAccuracy
AI2DDefaultDefaultAccuracy
InfographicVQAValidationDefaultANLS
MMStarDefaultDefaultAccuracy
ActivityNetQADefaultDefaultGPT-based Score
MM-LiveBenchDefault2406GPT-based Score
LLaVA-WilderDefaultSmallGPT-based Score
MathVerseDefaultVision miniGPT-based Score
MathVistaDefaultTestminiGPT-based Score
MMVetDefaultDefaultGPT-based Score
Vibe-EvalDefaultDefaultREKA-based Score
VideoChatGPTDefaultDefaultGPT-based Score
EgoSchemaDefaultDefaultSubmission
PerceptionTestDefaultMultiple Choice QASubmission
TextVQAValidationDefaultOfficial metric
", + "bbox": [ + 225, + 343, + 767, + 739 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/b373501466d7e91d7e686c23bd0ab34b12c97e4ccb856350d0e6b824e1c308d9.jpg", + "table_caption": [ + "Table 5. Performance of PACT on LLaVA-OneVision-7B using $d_{c} = 0.17$ and $\\alpha = 0.7$ ." + ], + "table_footnote": [], + "table_body": "
DatasetPACT (Ours)
MetricRed. RatioProc. TimeAlgo. Time
VideoMME57.769.2%0.3210.021
MME1571.072.1%0.2260.017
DocVQA85.471.1%0.4670.026
MLVU64.869.2%0.3220.022
LLaVA-Interleave62.272.2%0.1330.010
ChartQA77.371.4%0.3090.019
MMBench79.972.0%0.1340.010
MuirBench42.470.9%0.1750.013
ScienceQA93.572.0%0.1300.010
MMMU48.872.6%0.1030.007
AI2D81.272.5%0.1730.013
InfographicVQA61.570.0%0.4030.023
MMStar59.572.3%0.1470.011
ActivityNetQA55.170.0%0.4090.029
MathVerse17.176.0%0.3500.021
MathVista62.173.0%0.2600.015
EgoSchema60.069.1%0.3200.021
PerceptionTest52.370.0%0.3010.023
TextVQA75.569.2%0.3200.023
", + "bbox": [ + 256, + 119, + 743, + 463 + ], + "page_idx": 19 + }, + { + "type": "table", + "img_path": "images/47d6de9bf39a12c1ede01a2d5400e2a30ae9211ca9514afd9801eed89d29348e.jpg", + "table_caption": [ + "Table 6. Comparison of PACT with FastV, VTW, and ToME applied on InternVL2-8B on Various Datasets." + ], + "table_footnote": [], + "table_body": "
DatasetNo ReductionPACT (Ours)FastVVTWToME
MetricProc. TimeMetricRed. RatioProc. TimeMetricProc. TimeMetricProc. TimeMetricProc. Time
VideoMME52.20.24751.168.4%0.15151.10.15551.00.14250.20.190
MME1621.00.1711591.969.9%0.1211588.70.1181627.00.1111533.30.155
MLVU50.60.43949.768.8%0.32648.80.32549.50.33329.30.343
LLaVA-Interleave40.00.39039.071.2%0.26539.70.26339.60.23036.70.316
MMBench81.90.16180.470.4%0.11880.20.11680.20.10970.80.165
MuirBench35.70.43234.470.3%0.24935.60.25833.70.21032.70.296
ScienceQA97.10.16597.170.8%0.11895.80.11695.70.10989.90.151
MMMU48.50.16748.070.6%0.12647.70.12647.80.11947.50.156
AI2D82.50.14681.470.7%0.11278.50.11079.60.10574.40.142
MMStar59.00.17956.770.4%0.18654.20.18453.40.35255.10.156
PerceptionTest57.70.30056.866.0%0.20356.20.21334.10.19255.20.228
EgoSchema54.00.24053.767.0%0.15553.10.16332.20.14652.90.172
ActivityNet51.70.24051.366.0%0.15351.00.16130.80.14350.40.171
MM-LiveBench68.03.07567.368.0%2.14067.02.24740.42.00366.62.354
", + "bbox": [ + 94, + 503, + 906, + 709 + ], + "page_idx": 19 + }, + { + "type": "table", + "img_path": "images/ed6b232143a7b8525b16cacf76dc3a10e834e1960654e6bfa3d7bf0f470aac2c.jpg", + "table_caption": [ + "Table 7. Comparison of PACT with FastV, Prumerge, and Hired applied on LLaVA-1.6-Mistral-7B across multiple datasets." + ], + "table_footnote": [], + "table_body": "
DatasetNo ReductionPACT (Ours)FastVPrumergeHired
MetricProc. TimeMetricRed. RatioProc. TimeMetricProc. TimeMetricProc. TimeMetricProc. Time
MME1500.00.2371507.170.3%0.1591503.90.1581485.40.1661497.00.168
DocVQA70.00.36367.167.1%0.28464.50.28148.80.29365.80.295
ChartQA52.90.33249.370.1%0.25948.90.26136.00.26446.10.266
MMBench68.20.22668.071.9%0.15567.90.15466.20.16067.60.164
ScienceQA73.00.19772.771.5%0.14473.20.14571.70.14872.90.149
MMMU34.20.23934.971.5%0.17134.70.16933.90.18033.90.180
AI2D67.50.23367.570.9%0.16067.00.15864.50.16565.90.166
InfographicVQA36.90.29435.666.2%0.22633.40.22931.90.23631.60.236
MMStar36.20.37536.771.9%0.35036.60.40035.10.34535.90.345
", + "bbox": [ + 94, + 747, + 906, + 893 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 925, + 503, + 935 + ], + "page_idx": 19 + }, + { + "type": "table", + "img_path": "images/7c034be835ae6416aad9e6ae87aa0a942f2b6dec0befc8b2ea5c6961e0d1085d.jpg", + "table_caption": [ + "Table 8. Comparison of DBDPC and Agglomerative Clustering Methods for a Reduction Ratio of approximately $60\\%$ on LLaVA-OneVision-7B." + ], + "table_footnote": [], + "table_body": "
DatasetDBDPC (ours)Agg. (Single Linkage)Agg. (Average Linkage)Agg. (Complete Linkage)
MetricProc. TimeAlgo. TimeMetricProc. TimeAlgo. TimeMetricProc. TimeAlgo. TimeMetricProc. TimeAlgo. Time
VideoMME57.40.3890.04057.61.5041.14857.01.6571.31657.91.6901.350
MME1563.80.2550.0281554.10.9940.7381559.21.1230.8681563.01.1510.897
DocVQA84.70.5300.04483.61.8991.37984.42.1851.66284.32.3081.777
MLVU64.20.3840.03964.01.5741.22965.21.6751.32964.81.7001.355
LLaVA-Interleave62.10.1510.01662.00.4250.27761.50.4460.29861.40.4460.298
ChartQA76.00.3660.03174.51.1510.79875.81.2530.91075.81.2770.930
MMBench80.10.1510.01679.50.4270.27779.70.4370.29179.80.4490.299
MuirBench43.20.2150.02341.40.6670.47442.00.7270.53442.00.7380.544
ScienceQA94.70.1470.01594.80.3940.25094.70.4160.27194.70.4130.269
MMMU48.30.1100.00948.40.2180.11049.30.2320.12148.20.2250.117
AI2D80.70.2020.02280.80.6670.47280.60.7480.55180.10.7530.557
InfographicVQA61.60.5280.04657.11.6081.18159.81.8181.39459.81.8701.436
MMStar60.50.1670.01860.20.5070.34459.80.5560.39060.50.5600.395
", + "bbox": [ + 93, + 141, + 906, + 315 + ], + "page_idx": 20 + }, + { + "type": "table", + "img_path": "images/2da5772f2eeadcbe64d4a7c934552b43391ca5494d5f1c72760427bf69ecdd85.jpg", + "table_caption": [ + "Table 9. Comparison of DBDPC, DBSCAN, DPC, and KMeans Clustering Methods for a Reduction Ratio of approximately $60\\%$ on LLaVA-OneVision-7B." + ], + "table_footnote": [], + "table_body": "
DatasetDBDPC (ours)DBSCANDPCKMeans
MetricProc. TimeAlgo. TimeMetricProc. TimeAlgo. TimeMetricProc. TimeAlgo. TimeMetricProc. TimeAlgo. Time
VideoMME57.40.3890.04057.40.3940.04656.90.7290.39257.31.7251.383
MME1563.80.2550.0281560.30.2740.0361549.90.6370.3801549.91.2540.999
DocVQA84.70.5300.04484.20.5330.04483.00.9500.44279.62.0591.544
MLVU64.20.3840.03964.20.3910.04864.20.7270.38264.61.7251.377
LLaVA-Interleave62.10.1510.01660.40.1590.02663.90.2580.12162.30.7110.566
ChartQA76.00.3660.03175.20.3690.03475.20.7580.41574.21.3991.059
MMBench80.10.1510.01678.10.1530.02079.50.3260.17979.90.7020.552
MuirBench43.20.2150.02342.40.2190.02842.00.4660.27342.90.9550.763
ScienceQA94.70.1470.01591.20.1500.02494.30.2510.11793.40.6610.518
MMMU48.30.1100.00947.80.1300.03048.30.1870.07848.20.5000.391
AI2D80.70.2020.02279.20.2020.02280.30.4550.26481.11.0620.860
InfographicVQA61.60.5280.04654.00.5310.05256.60.9750.54757.81.7801.357
MMStar60.50.1670.01856.60.1790.02860.60.3760.21360.20.8280.661
", + "bbox": [ + 94, + 388, + 906, + 561 + ], + "page_idx": 20 + }, + { + "type": "table", + "img_path": "images/53c029aac0c8feed6528662a19f0a98c00dc5cad77900b4645d2e4b3090fe95c.jpg", + "table_caption": [ + "Table 10. Comparison of EUTI-based visual tokens pruning and FastV for a Reduction Ratio of approximately $60\\%$ on LLaVA-OneVision-7B." + ], + "table_footnote": [], + "table_body": "
DatasetEUTI (Ours)FastV
MetricProc. TimeAlgo. TimeMetricProc. TimeAlgo. Time
VideoMME58.40.3510.00557.60.3810.040
MME1560.00.2560.0041570.70.2830.025
DocVQA86.50.5210.00585.30.5590.032
MLVU64.30.3550.00463.10.3910.040
LLaVA-Interleave58.90.1400.00359.70.1520.007
ChartQA78.60.3440.00478.00.3630.016
MMBench80.20.1420.00379.20.1510.005
MuirBench40.00.1910.00340.80.2040.009
ScienceQA93.60.1370.00392.30.1490.006
MMMU48.80.1010.00247.30.1100.003
AI2D81.10.1910.00380.30.2020.009
InfographicVQA63.00.4250.00560.30.4730.040
MMStar59.60.1590.00359.60.1700.007
", + "bbox": [ + 169, + 636, + 823, + 885 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 925, + 504, + 936 + ], + "page_idx": 20 + }, + { + "type": "table", + "img_path": "images/c869c2029d490794dbb3bde4a68093b4736da9447da98d5fdbfcfc98533dc48b.jpg", + "table_caption": [ + "Table 11. Comparison of PACT with Standalone Methods: EUTI-based Visual Token Pruning and DBDPC Clustering Algorithm for a Reduction Ratio of approximately $70\\%$ , applied on LLaVA-OneVision-7B." + ], + "table_footnote": [], + "table_body": "
DatasetPACTDBDPCEUTI
MetricProc. TimeAlgo. TimeMetricProc. TimeAlgo. TimeMetricProc. TimeAlgo. Time
VideoMME57.50.3210.02157.30.3420.04058.40.3050.005
MME1558.70.2260.0171543.70.2430.0281595.90.2130.004
DocVQA84.30.4670.02682.50.5000.04485.30.4560.005
MLVU64.60.3220.02263.90.3580.03964.40.2910.004
LLaVA-Interleave63.90.1330.01062.60.1490.01657.10.1270.003
ChartQA77.20.3110.01975.10.3330.03178.20.2920.004
MMBench80.20.1340.01079.70.1470.01679.60.1280.003
MuirBench42.80.1750.01343.20.2110.02339.90.1640.003
ScienceQA93.60.1300.01093.80.1420.01592.20.1230.003
MMMU48.90.1030.00747.20.1090.00948.90.0960.002
AI2D80.60.1730.01380.50.1910.02279.90.1640.003
InfographicVQA61.90.4030.02358.80.4650.04660.40.3600.005
MMStar59.50.1470.01159.50.1630.01859.20.1400.003
", + "bbox": [ + 94, + 188, + 906, + 412 + ], + "page_idx": 21 + }, + { + "type": "table", + "img_path": "images/1e4b5a29c5c158bc8faf221aaeebaba29da94b0f519896f6bd7fbc3df2a52aee.jpg", + "table_caption": [ + "Table 12. Ablation Studies on DBDPC-based visual token reduction for a Reduction Ratio of approximately $60\\%$ on LLaVA-OneVision-7B. We report only the metrics, as processing time is similar across different approaches. When ablating the Center Position-IDs assignment, we reorder the hidden states based on the mean of the Position-IDs of the elements in each cluster and then assign position IDs sequentially." + ], + "table_footnote": [], + "table_body": "
DBDPCw/o Center Position-IDs assignmentw/o Proportional Attentionw/o Merging
VideoMME57.458.057.957.5
MME1563.81539.31523.81476.9
DocVQA84.728.284.283.1
MLVU64.261.263.963.5
LLaVA-Interleave62.169.663.263.6
ChartQA76.024.876.074.4
MMBench80.176.180.179.6
MuirBench43.226.543.244.0
ScienceQA94.767.494.293.6
MMMU48.334.547.648.2
AI2D80.743.080.479.9
InfographicVQA61.617.859.858.7
MMStar60.558.959.659.1
", + "bbox": [ + 99, + 608, + 890, + 837 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 21 + }, + { + "type": "table", + "img_path": "images/7c35b30d6fa0b6f7bac48158d8e6bf94a4fbad368daa8af814b0768af4faa148.jpg", + "table_caption": [ + "Table 13. Ablation Studies on the EUTI-based Visual Token Pruning for a Reduction Ratio of approximately $70\\%$ , applied on LLaVA-OneVision-7B. We report only the metrics, as processing time is similar across different approaches." + ], + "table_footnote": [], + "table_body": "
DatasetEUTIEUTI w/o NormNorm (EUTI w/o Global Query)
VideoMME58.457.656.6
MME1595.91573.41576.5
DocVQA85.385.179.7
MLVU64.363.063.1
LLaVA-Interleave57.157.952.9
ChartQA78.276.476.7
MMBench79.679.479.4
MuirBench40.040.539.6
ScienceQA92.291.893.5
MMMU48.949.349.2
AI2D79.979.979.7
InfographicVQA60.460.149.3
MMStar59.257.459.2
", + "bbox": [ + 207, + 126, + 784, + 354 + ], + "page_idx": 22 + }, + { + "type": "table", + "img_path": "images/15271f48c5f424341279adde269dbd210f51db8fd8ea993e5b4552d4548344f8.jpg", + "table_caption": [ + "Table 14. Ablation Study on Pruned Tokens Recovery for a Reduction Ratio of approximately $70\\%$ . We remove the token recovery step, which is equivalent to Setting $\\alpha$ to Zero. We report only the metrics, as processing time is similar across both approaches." + ], + "table_footnote": [], + "table_body": "
DatasetPACTPACT w/o Pruned-Token Recovery
VideoMME57.657.4
MME1556.71576.3
DocVQA84.384.3
MLVU64.664.2
LLaVA-Interleave63.959.6
ChartQA76.476.4
MMBench79.979.8
MuirBench42.842.2
ScienceQA93.393.6
MMMU48.548.5
AI2D80.680.6
InfographicVQA61.961.3
MMStar75.174.9
", + "bbox": [ + 264, + 398, + 725, + 627 + ], + "page_idx": 22 + }, + { + "type": "table", + "img_path": "images/7d8540bfc1f13e20032a59f4e2f73397b14276eca2d36cc26cb8541cdde345ac.jpg", + "table_caption": [ + "Table 15. Ablation Study on Keys Utilization in DBDPC for a Reduction Ratio of approximately $60\\%$ . Metrics are reported, as processing time is similar across both configurations." + ], + "table_footnote": [], + "table_body": "
DatasetDBDPCDBDPC w/o Keys
VideoMME57.4057.22
MME1563.801526.18
DocVQA84.7080.50
MLVU64.2064.60
LLaVA-Interleave62.1060.80
ChartQA76.0068.80
MMBench80.1079.21
MuirBench43.2041.40
ScienceQA94.7091.90
MMMU48.3047.90
AI2D80.7079.10
InfographicVQA61.656.70
MMStar60.5058.40
", + "bbox": [ + 320, + 671, + 669, + 900 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 22 + } +] \ No newline at end of file diff --git a/data/2025/2504_08xxx/2504.08966/d7a19180-6a88-4095-b8c3-57d4a8694136_model.json b/data/2025/2504_08xxx/2504.08966/d7a19180-6a88-4095-b8c3-57d4a8694136_model.json new file mode 100644 index 0000000000000000000000000000000000000000..2bd642412d0d697aacc8a8d92cb88438089e9a18 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/d7a19180-6a88-4095-b8c3-57d4a8694136_model.json @@ -0,0 +1,4481 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.263, + 0.061, + 0.706 + ], + "angle": 270, + "content": "arXiv:2504.08966v1 [cs.CV] 11 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.135, + 0.13, + 0.868, + 0.176 + ], + "angle": 0, + "content": "PACT: Pruning and Clustering-Based Token Reduction for Faster Visual Language Models" + }, + { + "type": "text", + "bbox": [ + 0.221, + 0.205, + 0.369, + 0.221 + ], + "angle": 0, + "content": "Mohamed Dhouib" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.222, + 0.466, + 0.258 + ], + "angle": 0, + "content": "LIX, École Polytechnique, IP Paris, France \nmohamed.dhouib@polytechnique.edu" + }, + { + "type": "text", + "bbox": [ + 0.245, + 0.267, + 0.351, + 0.282 + ], + "angle": 0, + "content": "Sonia Vanier" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.284, + 0.468, + 0.32 + ], + "angle": 0, + "content": "LIX, École Polytechnique, IP Paris, France \nsonia.vanier@polytechnique.edu" + }, + { + "type": "text", + "bbox": [ + 0.622, + 0.205, + 0.756, + 0.221 + ], + "angle": 0, + "content": "Davide Buscaldi" + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.223, + 0.874, + 0.256 + ], + "angle": 0, + "content": "LIPN, Université Sorbonne Paris Nord, France davide.buscaldi@lipn.univ-paris13.fr" + }, + { + "type": "text", + "bbox": [ + 0.628, + 0.267, + 0.751, + 0.283 + ], + "angle": 0, + "content": "Aymen Shabou" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.285, + 0.871, + 0.301 + ], + "angle": 0, + "content": "DataLab Groupe, Crédit Agricole S.A, France" + }, + { + "type": "text", + "bbox": [ + 0.536, + 0.305, + 0.838, + 0.318 + ], + "angle": 0, + "content": "aymen.shabou@credit-agricole-sa.fr" + }, + { + "type": "title", + "bbox": [ + 0.249, + 0.355, + 0.327, + 0.371 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.387, + 0.486, + 0.645 + ], + "angle": 0, + "content": "Visual Language Models require substantial computational resources for inference due to the additional input tokens needed to represent visual information. However, these visual tokens often contain redundant and unimportant information, resulting in an unnecessarily high number of tokens. To address this, we introduce PACT, a method that reduces inference time and memory usage by pruning irrelevant tokens and merging visually redundant ones at an early layer of the language model. Our approach uses a novel importance metric to identify unimportant tokens without relying on attention scores, making it compatible with FlashAttention. We also propose a novel clustering algorithm, called Distance Bounded Density Peak Clustering, which efficiently clusters visual tokens while constraining the distances between elements within a cluster by a predefined threshold. We demonstrate the effectiveness of PACT through extensive experiments." + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.678, + 0.222, + 0.695 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.704, + 0.486, + 0.902 + ], + "angle": 0, + "content": "Extending Large language models to modalities other than text [11, 18, 19, 55, 56] has seen success in recent years across various domains, especially in the visual domain with models like LLaVA [31] and Qwen-VL [4]. State-of-the-art Visual Language Models generally consist of three main components: a vision encoder, a connector, and a language model. The vision encoder converts input images into visual tokens, which are passed through the connector and then fed to the language model along with the input text. While this architecture has shown impressive performance across different tasks, it suffers from high computational cost due to the large number of visual tokens. In this paper, we introduce two complementary methods to" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.356, + 0.908, + 0.717 + ], + "angle": 0, + "content": "optimize Visual Language Models by reducing inference time and memory requirements: a pruning module and a clustering algorithm. These methods can be used independently or combined, forming the PACT approach for greater effectiveness. Notably, our pruning and clustering modules, as well as PACT, are applied at inference time and thus require no additional training. The pruning module identifies unimportant visual tokens based on a novel importance metric that evaluates each token's relevance without relying on attention scores. This makes it compatible with FlashAttention [12], as FlashAttention does not support the calculation of attention scores. The second module introduces a novel clustering algorithm, Distance Bounded Density Peak Clustering (DBDPC), which clusters visual tokens while ensuring that the distances between elements within a cluster are constrained by a predefined threshold. By combining these two methods, we develop PACT. First, the pruning module eliminates unimportant tokens, then the DBDPC algorithm clusters the remaining ones. Tokens that were initially pruned but are sufficiently close to the constructed clusters are reincorporated, ensuring that valuable information from the pruned tokens is recovered. Finally, the tokens within each cluster are merged into a single representative token, reducing the total token count." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.718, + 0.909, + 0.884 + ], + "angle": 0, + "content": "By combining both pruning and clustering, PACT achieves an effective visual token reduction, addressing both irrelevant and redundant tokens. When applied to LLaVA-OneVision-7B, PACT achieves a \\(50\\%\\) visual token reduction with negligible performance loss. Moreover, PACT exhibits significantly less performance degradation at higher reduction ratios compared to previous methods, achieving \\(71.3\\%\\) visual token reduction ratio with only \\(1.4\\%\\) performance drop, whereas previous state-of-the-art methods show at best a \\(4.4\\%\\) performance drop at an equal reduction ratio. Our contributions are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.886, + 0.908, + 0.902 + ], + "angle": 0, + "content": "- We propose a novel visual token pruning metric that does" + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.104, + 0.092, + 0.482, + 0.121 + ], + "angle": 0, + "content": "not rely on attention scores, ensuring compatibility with FlashAttention, and empirically validate its effectiveness." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.122, + 0.483, + 0.167 + ], + "angle": 0, + "content": "- We introduce a new clustering algorithm aimed at reducing visual redundancy and show its superiority over other clustering algorithms for visual token reduction." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.168, + 0.483, + 0.288 + ], + "angle": 0, + "content": "- We show that combining pruning with clustering-based merging surpasses either technique alone for visual token reduction. By integrating our pruning and clustering algorithms, we propose a novel approach, PACT, and demonstrate that it outperforms previous and concurrent works [3, 6, 9, 30, 44]. The codebase used to obtain the results in this study is available at https://github.com/orailix/PACT/tree/main." + }, + { + "type": "list", + "bbox": [ + 0.092, + 0.122, + 0.483, + 0.288 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.304, + 0.229, + 0.32 + ], + "angle": 0, + "content": "2. Related work" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.33, + 0.308, + 0.346 + ], + "angle": 0, + "content": "2.1. Visual language models" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.352, + 0.483, + 0.641 + ], + "angle": 0, + "content": "Since the introduction of BLIP-2 [28], the use of a visual encoder followed by a connector that feeds visual vectors to the language model has become the standard architecture for Visual Language Models (VLMs) [7, 17, 50]. Recent models [10, 27, 49] have enhanced VLM architecture with high-resolution handling, which is necessary for document understanding tasks [13, 23]. LLaVA-OneVision [27] divides images into \\(384 \\times 384\\) crops, encodes each part with SigLIP [54], and uses bilinear interpolation to reduce token count up to 8,748 tokens. InternVL2 [10] splits images into \\(448 \\times 448\\) tiles, processing up to 40 tiles per image with InternViT [10], and applies pixel shuffle to reduce the number of visual tokens, producing up to 10,240 tokens. Qwen-VL2 [49] uses 2D Rotary Positional Embeddings for dynamic resolution support and merges adjacent tokens via an MLP layer, yet still requires over 10,000 tokens for high resolution images. While these models apply token reduction by merging adjacent tokens to preserve structure, they do not address token irrelevance or redundancy, limiting efficiency." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.652, + 0.303, + 0.667 + ], + "angle": 0, + "content": "2.2. Visual token reduction" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.675, + 0.483, + 0.901 + ], + "angle": 0, + "content": "Reducing the number of visual tokens in Vision Transformers (ViT) has been a key focus of the research community for several years. EViT [29] identifies and merges irrelevant tokens by relying on the attention scores between the class token ([CLS]) and visual tokens. ToME [6] proposed a simple yet effective approach that iteratively merges similar tokens throughout the ViT layers. Building on these ideas, recent efforts have extended visual token reduction techniques to VLMs. LaVIT [21] used the Gumbel-Softmax [20] to train a mask that selects tokens for retention, merging discarded tokens into retained ones via additional attention layers. LLaVA-PruMerge [44] accelerates LLAVA 1.5 [31] by leveraging the attention scores between the [CLS] token and visual tokens in the last layer of the ViT encoder to decide which tokens to retain, while HiRED [3] refines" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.908, + 0.591 + ], + "angle": 0, + "content": "this approach by allocating token budgets based on attention from earlier layers. However, both these methods are only applicable to architectures where a ViT is used and a [CLS] token is added to the input visual sequence, making them incompatible with the majority state-of-the-art VLMs, which do not use a [CLS] token. Moreover, both methods attribute scores to tokens at the output of the visual encoder, but recent VLMs merge adjacent visual tokens before passing them to the language model. It is unclear how to attribute pre-merging scores to the resulting tokens, making LLaVA-PruMerge and HiRED inapplicable. We note that LLaVA-PruMerge mitigates information loss by merging pruned tokens with retained ones. However, it does not merge similar retained tokens; therefore, it does not address visual redundancy, a typical limitation of pruning-based methods. TRIM [46] prunes tokens based on similarity with pooled text from CLIP [42]. However, as TRIM relies on textual information for pruning, it is less suitable for multi-turn conversations where, in practice, visual tokens would be pruned solely based on the text information available during the image's forward pass, potentially losing crucial information required to answer subsequent prompts. FastV [9] evaluates token importance via average attention scores, which is not compatible with FlashAttention, adding computational overhead for recent VLMs. VTW [30] removes tokens in deeper layers. While this method shows promising results, its reduction of computational costs is limited as visual tokens are only withdrawn in later layers. These previous methods address only one of two issues: the presence of unimportant tokens or visual redundancy. In this work, we introduce PACT, a novel approach that tackles both issues simultaneously by pruning irrelevant tokens and merging visually redundant ones." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.604, + 0.605, + 0.619 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.629, + 0.909, + 0.901 + ], + "angle": 0, + "content": "In this section, we present PACT, a method that aims to reduce VLMs inference time and memory usage by pruning unimportant tokens and merging visually redundant ones at an early layer \\(L\\) of the language model. PACT consists of three steps: First, unimportant tokens are identified. Next, the remaining tokens are clustered. Finally, tokens in each cluster, along with sufficiently close tokens that were initially discarded, are merged. PACT operates within a selected layer \\(L\\) of the language model and is applicable in scenarios where visual tokens are fed into the language model, regardless of the architecture of the visual encoder or connector. The three-step process of PACT is illustrated in Figure 1. We denote the hidden states at layer \\(L\\) by \\(\\mathbf{H} \\in \\mathbb{R}^{n \\times d}\\), where \\(n\\) is the number of visual tokens and \\(d\\) is the dimensionality of the hidden states. We denote by \\(\\mathbf{K}, \\mathbf{Q} \\in \\mathbb{R}^{n \\times n_h \\times d_h}\\) the key and query matrices for the visual tokens at layer \\(L\\), where \\(n_h\\) represents the number of attention heads and \\(d_h\\) is the dimensionality of each attention heads." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.134, + 0.089, + 0.865, + 0.286 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.295, + 0.908, + 0.352 + ], + "angle": 0, + "content": "Figure 1. Simplified illustration of PACT. This figure illustrates the three-step process of PACT: (1) First, EUTI is used to prune visual tokens deemed unimportant; (2) Then, DBDPC is applied to cluster the remaining tokens, ensuring that the distance between each token and its corresponding cluster center is smaller than the cutoff distance; (3) Finally, initially pruned tokens that are close to cluster centers are reintegrated, and the elements within each cluster are merged to form the reduced set of visual tokens." + }, + { + "type": "title", + "bbox": [ + 0.093, + 0.377, + 0.222, + 0.392 + ], + "angle": 0, + "content": "Algorithm 1 EUTI" + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.398, + 0.482, + 0.414 + ], + "angle": 0, + "content": "Input: Hidden states \\(\\mathbf{H} \\in \\mathbb{R}^{n \\times d}\\); key and query matrices" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.414, + 0.44, + 0.429 + ], + "angle": 0, + "content": "\\(\\mathbf{K}, \\mathbf{Q} \\in \\mathbb{R}^{n \\times n_h \\times d_h}\\); pruning percentage \\(\\lambda \\in [0,1]\\)" + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.43, + 0.473, + 0.444 + ], + "angle": 0, + "content": "Output: Sets of important and unimportant visual tokens" + }, + { + "type": "text", + "bbox": [ + 0.108, + 0.445, + 0.397, + 0.459 + ], + "angle": 0, + "content": "Step 1: Calculate the global query vector" + }, + { + "type": "equation", + "bbox": [ + 0.109, + 0.459, + 0.253, + 0.475 + ], + "angle": 0, + "content": "\\[\n\\mathbf {Q} _ {\\text {g l o b a l}} = \\frac {1}{n} \\sum_ {i = 1} ^ {n} \\mathbf {Q} _ {i}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.476, + 0.483, + 0.503 + ], + "angle": 0, + "content": "Step 2: Compute the importance score for each visual token" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.505, + 0.261, + 0.519 + ], + "angle": 0, + "content": "for all \\(i = 1,\\dots ,n\\) do" + }, + { + "type": "equation", + "bbox": [ + 0.134, + 0.519, + 0.449, + 0.539 + ], + "angle": 0, + "content": "\\[\ns _ {i} = \\frac {1}{n _ {h}} \\sum_ {j = 1} ^ {n _ {h}} \\operatorname {S o f t m a x} \\left(\\mathbf {k} _ {i} ^ {(j)} \\cdot \\mathbf {Q} _ {\\text {g l o b a l}} ^ {(j)}\\right) \\cdot \\left\\| \\mathbf {h} _ {i} \\right\\| _ {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.538, + 0.162, + 0.55 + ], + "angle": 0, + "content": "end for" + }, + { + "type": "text", + "bbox": [ + 0.108, + 0.552, + 0.483, + 0.58 + ], + "angle": 0, + "content": "Step 3: Define sets of important and unimportant tokens" + }, + { + "type": "equation", + "bbox": [ + 0.109, + 0.582, + 0.36, + 0.597 + ], + "angle": 0, + "content": "\\[\nS _ {\\text {i m p o r t a n t}} = \\left\\{i \\mid s _ {i} \\geq \\text {P e r c e n t i l e} (s, \\lambda) \\right\\}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.11, + 0.597, + 0.371, + 0.613 + ], + "angle": 0, + "content": "\\[\nS _ {\\text {u n i m p o r t a n t}} = \\{i \\mid s _ {i} < \\text {P e r c e n t i l e} (s, \\lambda) \\}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.613, + 0.298, + 0.629 + ], + "angle": 0, + "content": "Return \\(S_{\\text{important}}\\), \\(S_{\\text{unimportant}}\\)" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.663, + 0.483, + 0.741 + ], + "angle": 0, + "content": "tion head. For simplicity, we omit the layer index in the notation. We denote the position index of a token by a subscript, while the attention head is indicated by a superscript. For instance, \\(\\mathbf{k}_i^{(j)}\\) represents the key vector corresponding to the \\(i\\)-th visual token and the \\(j\\)-th attention head." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.756, + 0.388, + 0.772 + ], + "angle": 0, + "content": "3.1. Unimportant tokens identification" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.781, + 0.483, + 0.901 + ], + "angle": 0, + "content": "A straightforward approach to identifying unimportant tokens at a certain layer \\( L \\) of the used language model is to define the importance of each token as the total attention score that a given token receives from all other tokens [9]. However, this method has three main drawbacks. First, current VLMs utilize FlashAttention [12], which does not support outputting attention scores. Secondly, attention scores are computed with masking, which introduces" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.378, + 0.907, + 0.635 + ], + "angle": 0, + "content": "biases. Tokens at the end of a sequence tend to receive lower average attention scores since fewer tokens attend to them. Calculating the average attention score for each token based solely on the tokens that attend to it can mitigate this masking effect but introduces a new bias: end-of-sequence tokens may exhibit higher scores as they receive attention mainly from nearby tokens. This leads to either earlier or later tokens being pruned more frequently, as shown in Fig. 2. Such positional bias should be avoided, as pruning should depend solely on the information that visual tokens hold, not their position. Finally, relying only on keys and queries at a single layer to determine an importance metric may fail to fully capture the significance of visual tokens across all layers of the language model, mainly because each self-attention layer focuses on different aspects of the visual tokens. To address this, we propose an importance metric that incorporates the accumulated in" + }, + { + "type": "image", + "bbox": [ + 0.526, + 0.666, + 0.691, + 0.745 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.515, + 0.76, + 0.708, + 0.786 + ], + "angle": 0, + "content": "(a) Average attention scores as a function of Position IDs." + }, + { + "type": "image", + "bbox": [ + 0.725, + 0.667, + 0.886, + 0.745 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.711, + 0.76, + 0.905, + 0.8 + ], + "angle": 0, + "content": "(b) Average attention scores relative to non-masked tokens as a function of Position IDs." + }, + { + "type": "image_caption", + "bbox": [ + 0.512, + 0.814, + 0.907, + 0.897 + ], + "angle": 0, + "content": "Figure 2. Illustration of the bias induced by the use of the average attention scores across visual tokens as a pruning metric. In (a), averaging attention over all tokens favors earlier tokens, leading to pruning later tokens more frequently. In (b), averaging only over attending tokens reverses the bias, leading to earlier tokens being pruned more often." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.107, + 0.091, + 0.468, + 0.256 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.271, + 0.483, + 0.3 + ], + "angle": 0, + "content": "Figure 3. Illustration of visual token norm statistics at the fourth layer of LLaVA-OneVision-7B." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.323, + 0.485, + 0.566 + ], + "angle": 0, + "content": "formation from the hidden states and the layer-specific information from the keys and queries at an early layer \\( L \\). We refer to this method as Efficient Unimportant Tokens Identification (EUTI). We speculate that the norm of hidden states can provide critical information about the importance of each visual token, as they reflect how much information a particular token carries through the network. Figure 3 presents statistics on the hidden state norms of visual tokens at the fourth layer of LLaVA-OneVision-7B, indicating a high variance. This variance suggests that certain visual tokens accumulate more information through residual connections and may therefore be more important for subsequent calculations. To leverage information from both hidden state norms and the key and query vectors, we first compute a global query vector \\( \\mathbf{Q}_{\\mathrm{global}} \\) as the average of all query vectors across visual tokens:" + }, + { + "type": "equation", + "bbox": [ + 0.22, + 0.573, + 0.483, + 0.612 + ], + "angle": 0, + "content": "\\[\n\\mathbf {Q} _ {\\text {g l o b a l}} = \\frac {1}{n} \\sum_ {i = 1} ^ {n} \\mathbf {Q} _ {i} \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.619, + 0.483, + 0.65 + ], + "angle": 0, + "content": "This vector represents the overall query information requested by visual tokens at layer \\(L\\) across all attention" + }, + { + "type": "image", + "bbox": [ + 0.108, + 0.67, + 0.468, + 0.842 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.855, + 0.483, + 0.898 + ], + "angle": 0, + "content": "Figure 4. Illustration of the maximum distance between the keys of visual tokens for the first 10 layers of LLaVA-OneVision-7B before the application of rotary embeddings." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.906, + 0.183 + ], + "angle": 0, + "content": "heads. The importance score for each visual token is then computed by first taking the dot product between its key and the global query for each attention head. A softmax is applied across visual tokens within each attention head, followed by averaging across attention heads. The final score is obtained by scaling the result with the hidden state norm:" + }, + { + "type": "equation", + "bbox": [ + 0.558, + 0.202, + 0.906, + 0.245 + ], + "angle": 0, + "content": "\\[\ns _ {i} = \\frac {1}{n _ {h}} \\sum_ {j = 1} ^ {n _ {h}} \\operatorname {S o f t m a x} \\left(\\mathbf {k} _ {i} ^ {(j)} \\cdot \\mathbf {Q} _ {\\text {g l o b a l}} ^ {(j)}\\right) \\cdot \\| \\mathbf {h} _ {i} \\| _ {2} \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.25, + 0.906, + 0.31 + ], + "angle": 0, + "content": "Then, we divide the visual tokens into important and unimportant tokens, using a parameter \\(\\lambda \\in [0,1]\\) to control the percentage of tokens deemed unimportant. The two sets are defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.581, + 0.32, + 0.907, + 0.339 + ], + "angle": 0, + "content": "\\[\nS _ {\\text {i m p o r t a n t}} = \\left\\{i \\mid s _ {i} \\geq \\text {P e r c e n t i l e} (s, \\lambda) \\right\\} \\tag {3}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.577, + 0.344, + 0.906, + 0.362 + ], + "angle": 0, + "content": "\\[\nS _ {\\text {u n i m p o r t a n t}} = \\left\\{i \\mid s _ {i} < \\text {P e r c e n t i l e} (s, \\lambda) \\right\\} \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.366, + 0.906, + 0.472 + ], + "angle": 0, + "content": "Unimportant tokens can be pruned, or the resulting sets can be combined with a clustering algorithm to further reduce the number of visual tokens, as we will show in the next section. The full EUTI algorithm is illustrated in Algorithm 1. We note that in the case where Rotary Embeddings are used [47], we use the keys and queries before their application to avoid any positional bias." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.479, + 0.874, + 0.496 + ], + "angle": 0, + "content": "3.2. Clustering-based merging of visual tokens" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.501, + 0.906, + 0.622 + ], + "angle": 0, + "content": "Distance Bounded Density Peak Clustering Relying solely on the importance scores presented above to prune unimportant tokens can lead to a significant reduction in visual tokens, retaining only important ones. However, redundant information may still be present across retained visual tokens. Therefore, we propose merging the redundant visual tokens using a clustering algorithm. We desire our clustering algorithm to have the following characteristics:" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.624, + 0.719, + 0.638 + ], + "angle": 0, + "content": "(a) Low computational time." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.639, + 0.905, + 0.669 + ], + "angle": 0, + "content": "(b) Avoid assigning points that are far from each other, in terms of feature similarity, into the same cluster." + }, + { + "type": "list", + "bbox": [ + 0.513, + 0.624, + 0.905, + 0.669 + ], + "angle": 0, + "content": null + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.683, + 0.906, + 0.738 + ], + "angle": 0, + "content": "Table 1. Throughput ratio, reduction ratio, and GPU memory usage for PACT, FastV, VTW, and ToME applied to LLaVA-OneVision-7B. Results are reported at a \\(98.6\\%\\) Approach-to-Reference Metric Ratio." + }, + { + "type": "table", + "bbox": [ + 0.516, + 0.75, + 0.906, + 0.798 + ], + "angle": 0, + "content": "
No reductionPACT (ours)FastVVTWToME
Reduction Ratio0%71.3%50%25%40%
LLM Throughput Ratio100%225%165%160%137%
GPU Maximum Memory Consumption (GB)27.419.0530.419.221.4
" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.81, + 0.906, + 0.901 + ], + "angle": 0, + "content": "Condition (b) ensures that outliers are not assigned to distant cluster centers, as we speculate that these outliers contain important information and should only be merged with nearby outliers or remain as single points in separate clusters. Condition (b) also guarantees that points in each cluster will be relatively close to each other, which minimizes" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.485, + 0.242 + ], + "angle": 0, + "content": "information loss when assigning a single vector as their representative. The Density Peaks Clustering (DPC) algorithm [5] is appealing in this context because it satisfies condition (a), unlike iterative clustering algorithms like k-means [2]. However, DPC does not satisfy condition (b) as it can form large clusters where boundary points may be distant from each other. The same issue arises with other algorithms such as DBSCAN [14]. Therefore, we propose a new clustering algorithm, which we call Distance Bounded Density Peaks Clustering (DBDPC)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.243, + 0.484, + 0.336 + ], + "angle": 0, + "content": "DBDPC takes as input a set of vectors \\(\\{\\mathbf{u}_i\\in \\mathbb{R}^{d_1}\\}_{i = 1}^q\\) where \\(q,d_{1}\\in \\mathbb{N}^{+}\\), and outputs a set of clusters. Our algorithm's output depends on two parameters, the cutoff distance \\(d_c\\in \\mathbb{R}^+\\) and a normalization factor \\(d_{n}\\in \\mathbb{R}^{+}\\), as well as a distance function \\(d:\\mathbb{R}^{d_1}\\times \\mathbb{R}^{d_1}\\to \\mathbb{R}^+\\). We define the distance between two vectors \\(\\mathbf{u}_i\\) and \\(\\mathbf{u}_j\\) as:" + }, + { + "type": "equation", + "bbox": [ + 0.166, + 0.346, + 0.484, + 0.378 + ], + "angle": 0, + "content": "\\[\nd _ {i j} = d \\left(\\mathbf {u} _ {i}, \\mathbf {u} _ {j}\\right) = 1 - \\frac {\\mathbf {u} _ {i} \\cdot \\mathbf {u} _ {j}}{\\| \\mathbf {u} _ {i} \\| _ {2} \\| \\mathbf {u} _ {j} \\| _ {2}} \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.389, + 0.368, + 0.404 + ], + "angle": 0, + "content": "Then the local density \\(\\rho_{i}\\) is calculated as:" + }, + { + "type": "equation", + "bbox": [ + 0.224, + 0.417, + 0.483, + 0.45 + ], + "angle": 0, + "content": "\\[\n\\rho_ {i} = \\sum_ {j} e ^ {- d _ {i j} / d _ {n}} \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.462, + 0.484, + 0.794 + ], + "angle": 0, + "content": "We process the \\(\\mathbf{u}_i\\) vectors from highest to lowest \\(\\rho\\) values and designate a vector as a cluster center if its minimum distance from already selected centers is greater than \\(d_c\\). Each vector \\(\\mathbf{u}_i\\) is then assigned to the cluster of the closest center. Our algorithm guarantees that the distance from each vector to its cluster center is less than \\(d_c\\), thereby satisfying condition (b) stated above. The full DBDPC algorithm is detailed in Algorithm 2. The center identification process in DBDPC ensures that inter-cluster distances are upper-bounded by \\(2d_c \\times (2 - d_c)\\) while distances between cluster centers are lower-bounded by \\(d_c\\), which we formally prove in Appendix B. We note that several parts of our algorithm are presented as for-loops for clarity. However, all computations are parallelizable on GPU, as there are no dependencies between the elements of each loop, except for the part where we select cluster centers. For this part, we use a recursive algorithm that efficiently identifies an initial set of centers and discarded vectors, thereby reducing the number of vectors to be processed. We explain this in detail in Appendix D. For a comparison between DBDPC and DPC, as well as a qualitative comparison with other clustering algorithms, refer to Appendix C." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.796, + 0.483, + 0.809 + ], + "angle": 0, + "content": "Which vectors should be used for distance calculation?" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.811, + 0.484, + 0.901 + ], + "angle": 0, + "content": "As previously discussed, the DBDPC algorithm operates on a set of vectors that are used for distance calculation. To achieve effective clustering, the dot product between these vectors needs to accurately reflect the similarity between the corresponding visual tokens. Fortunately, transformers address this issue through the QKV self-attention mechanism." + }, + { + "type": "title", + "bbox": [ + 0.516, + 0.092, + 0.664, + 0.106 + ], + "angle": 0, + "content": "Algorithm 2 DBDPC" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.112, + 0.907, + 0.143 + ], + "angle": 0, + "content": "Input: Cutoff distance \\(d_{c} \\in \\mathbb{R}^{+}\\), normalization factor \\(d_{n} \\in \\mathbb{R}^{+}\\), set of vectors \\(\\{\\mathbf{u}_i \\in \\mathbb{R}^{d_1}\\}_{i=1}^q\\)" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.143, + 0.906, + 0.172 + ], + "angle": 0, + "content": "Output: Cluster center indices \\( C_{\\text{centers}} \\), element indices in each cluster \\( C_{\\text{elements}} \\)" + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.173, + 0.692, + 0.188 + ], + "angle": 0, + "content": "for all pairs \\((\\mathbf{u}_i,\\mathbf{u}_j)\\) do" + }, + { + "type": "equation", + "bbox": [ + 0.557, + 0.188, + 0.699, + 0.207 + ], + "angle": 0, + "content": "\\[\nd _ {i j} = 1 - \\frac {\\mathbf {u} _ {i} \\cdot \\mathbf {u} _ {j}}{\\| \\mathbf {u} _ {i} \\| _ {2} \\| \\mathbf {u} _ {j} \\| _ {2}}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.206, + 0.586, + 0.217 + ], + "angle": 0, + "content": "end for" + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.22, + 0.67, + 0.234 + ], + "angle": 0, + "content": "for all vectors \\(\\mathbf{u}_i\\) do" + }, + { + "type": "equation", + "bbox": [ + 0.557, + 0.234, + 0.693, + 0.252 + ], + "angle": 0, + "content": "\\[\n\\rho_ {i} = \\sum_ {j = 1} ^ {q} e ^ {- d _ {i j} / d _ {n}}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.252, + 0.586, + 0.262 + ], + "angle": 0, + "content": "end for" + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.265, + 0.905, + 0.295 + ], + "angle": 0, + "content": "Sort vectors by \\(\\rho_{i}\\) in descending order, obtaining indices \\([i_1,i_2,\\dots ,i_q]\\)" + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.295, + 0.825, + 0.31 + ], + "angle": 0, + "content": "Initialize \\(C_{\\mathrm{centers}} = \\{i_1\\}\\) \\(C_{\\mathrm{elements}} = \\{i_1:\\emptyset \\}\\)" + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.311, + 0.766, + 0.324 + ], + "angle": 0, + "content": "for all indices \\(i_k\\) in sorted order do" + }, + { + "type": "text", + "bbox": [ + 0.556, + 0.326, + 0.752, + 0.341 + ], + "angle": 0, + "content": "if \\(\\min_{s\\in C_{\\mathrm{centers}}}d_{i_ks} > d_c\\) then" + }, + { + "type": "equation", + "bbox": [ + 0.581, + 0.341, + 0.742, + 0.356 + ], + "angle": 0, + "content": "\\[\nC _ {\\text {c e n t e r s}} = C _ {\\text {c e n t e r s}} \\cup \\left\\{i _ {k} \\right\\}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.582, + 0.356, + 0.689, + 0.371 + ], + "angle": 0, + "content": "\\[\nC _ {\\text {e l e m e n t s}} \\left[ i _ {k} \\right] = \\emptyset\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.556, + 0.372, + 0.6, + 0.383 + ], + "angle": 0, + "content": "end if" + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.387, + 0.585, + 0.398 + ], + "angle": 0, + "content": "end for" + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.401, + 0.658, + 0.413 + ], + "angle": 0, + "content": "for all indices \\(i\\) do" + }, + { + "type": "text", + "bbox": [ + 0.556, + 0.417, + 0.717, + 0.431 + ], + "angle": 0, + "content": "\\(s_i = \\text{argmin}_{s \\in C_{\\text{centers}}} d_{is}\\)" + }, + { + "type": "equation", + "bbox": [ + 0.557, + 0.431, + 0.77, + 0.447 + ], + "angle": 0, + "content": "\\[\nC _ {\\text {e l e m e n t s}} [ s _ {i} ] = C _ {\\text {e l e m e n t s}} [ s _ {i} ] \\cup \\{i \\}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.447, + 0.586, + 0.458 + ], + "angle": 0, + "content": "end for" + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.462, + 0.694, + 0.476 + ], + "angle": 0, + "content": "Return \\(C_{\\mathrm{centers}}\\) \\(C_{\\mathrm{elements}}\\)" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.507, + 0.907, + 0.567 + ], + "angle": 0, + "content": "Specifically, the key vectors \\( K \\) provide a meaningful representation of each token, tailored for dot product similarity. Therefore, we will use the key vectors in the DBDPC algorithm. Formally, we have:" + }, + { + "type": "equation", + "bbox": [ + 0.599, + 0.579, + 0.907, + 0.597 + ], + "angle": 0, + "content": "\\[\nC _ {\\text {c e n t e r s}}, C _ {\\text {e l e m e n t s}} = \\mathrm {D B D P C} \\left(K ^ {\\prime}\\right) \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.608, + 0.906, + 0.639 + ], + "angle": 0, + "content": "where \\( K' = \\{\\mathbf{u}_i \\in K \\mid i \\in S_{\\text{important}}\\} \\) is the subset of keys consisting of elements with indices in \\( S_{\\text{important}} \\)." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.64, + 0.906, + 0.779 + ], + "angle": 0, + "content": "What about unimportant tokens near cluster centers? Tokens initially deemed unimportant but close enough to cluster centers have a high probability of being mislabeled. We add these tokens to the corresponding cluster to limit information loss. Formally, we define a threshold based on a coefficient \\(\\alpha\\), where any token \\(\\mathbf{u}_i\\), initially excluded, is added to the cluster of the closest center \\(s \\in C_{\\text{centers}}\\) if its distance to the center satisfies \\(d_{is} < \\alpha \\cdot d_c\\). Specifically, the new cluster elements set \\(C_{\\text{elements}}^{(s)}\\) is updated as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.527, + 0.79, + 0.905, + 0.814 + ], + "angle": 0, + "content": "\\[\nS _ {\\text {a d d e d}} ^ {(s)} = \\left\\{i \\in S _ {\\text {u n i m p o r t a n t}} \\mid s = \\operatorname {a r g m i n} _ {s ^ {\\prime} \\in C _ {\\text {c e n t e r s}}} d _ {i s ^ {\\prime}} \\right. \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.711, + 0.813, + 0.832, + 0.828 + ], + "angle": 0, + "content": "and \\(d_{is} < \\alpha \\cdot d_c\\}\\)" + }, + { + "type": "equation", + "bbox": [ + 0.614, + 0.844, + 0.905, + 0.865 + ], + "angle": 0, + "content": "\\[\nC _ {\\text {e l e m e n t s}} ^ {(s)} \\leftarrow C _ {\\text {e l e m e n t s}} ^ {(s)} \\cup S _ {\\text {a d d e d}} ^ {(s)} \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.871, + 0.907, + 0.902 + ], + "angle": 0, + "content": "Merging Finally, the hidden states corresponding to the elements in each cluster are merged. Formally, the merged" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.093, + 0.092, + 0.225, + 0.106 + ], + "angle": 0, + "content": "Algorithm 3 PACT" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.112, + 0.484, + 0.173 + ], + "angle": 0, + "content": "Input: Hidden states \\(\\mathbf{H} = [\\mathbf{h}_1, \\dots, \\mathbf{h}_n] \\in \\mathbb{R}^{n \\times d}\\); key and query matrices \\(\\mathbf{K}, \\mathbf{Q} \\in \\mathbb{R}^{n \\times n_h \\times d_h}\\); position IDs \\(\\mathbf{P} = [p_1, \\dots, p_n]\\); pruning percentage \\(\\lambda \\in [0, 1]\\); cutoff distance \\(d_c > 0\\); tolerance coefficient \\(\\alpha > 0\\)" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.173, + 0.465, + 0.188 + ], + "angle": 0, + "content": "Output: Merged hidden states \\(\\mathbf{H}'\\); new position IDs \\(\\mathbf{P}'\\)" + }, + { + "type": "ref_text", + "bbox": [ + 0.108, + 0.189, + 0.468, + 0.203 + ], + "angle": 0, + "content": "Step 1: Identify important and unimportant tokens" + }, + { + "type": "ref_text", + "bbox": [ + 0.109, + 0.204, + 0.406, + 0.219 + ], + "angle": 0, + "content": "\\(S_{\\mathrm{important}}\\) \\(S_{\\mathrm{unimportant}}\\gets \\mathrm{EUTI}(\\mathbf{H},\\mathbf{K},\\mathbf{Q},p)\\)" + }, + { + "type": "ref_text", + "bbox": [ + 0.109, + 0.219, + 0.436, + 0.232 + ], + "angle": 0, + "content": "Step 2: Cluster important tokens with DBDPC" + }, + { + "type": "ref_text", + "bbox": [ + 0.109, + 0.233, + 0.319, + 0.248 + ], + "angle": 0, + "content": "\\(\\mathbf{K}^{\\prime}\\gets \\{\\mathbf{k}_{i}\\in \\mathbf{K}\\mid i\\in S_{\\mathrm{important}}\\}\\)" + }, + { + "type": "ref_text", + "bbox": [ + 0.109, + 0.249, + 0.359, + 0.264 + ], + "angle": 0, + "content": "\\(C_{\\mathrm{centers}}\\) \\(C_{\\mathrm{elements}}\\gets \\mathrm{DBDPC}(\\mathbf{K}^{\\prime},d_{c})\\)" + }, + { + "type": "ref_text", + "bbox": [ + 0.108, + 0.265, + 0.482, + 0.292 + ], + "angle": 0, + "content": "Step 3: Assign unimportant tokens to sufficiently close clusters." + }, + { + "type": "ref_text", + "bbox": [ + 0.109, + 0.294, + 0.272, + 0.309 + ], + "angle": 0, + "content": "for all \\(i\\in S_{\\mathrm{unimportant}}\\) do" + }, + { + "type": "ref_text", + "bbox": [ + 0.135, + 0.311, + 0.256, + 0.324 + ], + "angle": 0, + "content": "\\(s_i\\gets argmin_s d_{is}\\)" + }, + { + "type": "ref_text", + "bbox": [ + 0.135, + 0.325, + 0.262, + 0.339 + ], + "angle": 0, + "content": "if \\(d_{isi} < \\alpha .d_c\\) then" + }, + { + "type": "ref_text", + "bbox": [ + 0.159, + 0.34, + 0.331, + 0.358 + ], + "angle": 0, + "content": "\\(C_{\\mathrm{elements}}^{(s_i)} \\gets C_{\\mathrm{elements}}^{(s_i)} \\cup \\{i\\}\\)" + }, + { + "type": "ref_text", + "bbox": [ + 0.134, + 0.359, + 0.176, + 0.37 + ], + "angle": 0, + "content": "end if" + }, + { + "type": "ref_text", + "bbox": [ + 0.109, + 0.373, + 0.162, + 0.385 + ], + "angle": 0, + "content": "end for" + }, + { + "type": "ref_text", + "bbox": [ + 0.108, + 0.387, + 0.471, + 0.402 + ], + "angle": 0, + "content": "Step 4: Merge hidden states and assign position IDs" + }, + { + "type": "ref_text", + "bbox": [ + 0.109, + 0.403, + 0.252, + 0.417 + ], + "angle": 0, + "content": "for all \\(s\\in C_{\\mathrm{centers}}\\) do" + }, + { + "type": "ref_text", + "bbox": [ + 0.134, + 0.418, + 0.321, + 0.439 + ], + "angle": 0, + "content": "\\(\\mathbf{h}_s^{\\prime}\\gets \\frac{1}{|C_{\\mathrm{elements}}^{(s)}|}\\sum_{i\\in C_{\\mathrm{elements}}^{(s)}}\\mathbf{h}_i\\)" + }, + { + "type": "ref_text", + "bbox": [ + 0.134, + 0.44, + 0.191, + 0.454 + ], + "angle": 0, + "content": "\\(p_s^\\prime \\gets p_s\\)" + }, + { + "type": "ref_text", + "bbox": [ + 0.109, + 0.455, + 0.162, + 0.466 + ], + "angle": 0, + "content": "end for" + }, + { + "type": "ref_text", + "bbox": [ + 0.109, + 0.468, + 0.21, + 0.482 + ], + "angle": 0, + "content": "Return H', P'" + }, + { + "type": "list", + "bbox": [ + 0.092, + 0.112, + 0.484, + 0.482 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.514, + 0.295, + 0.529 + ], + "angle": 0, + "content": "hidden states are computed as:" + }, + { + "type": "equation", + "bbox": [ + 0.105, + 0.538, + 0.483, + 0.59 + ], + "angle": 0, + "content": "\\[\n\\mathbf {H} ^ {\\prime} = \\left\\{\\frac {1}{| C _ {\\text {e l e m e n t s}} ^ {(j)} |} \\sum_ {i \\in C _ {\\text {e l e m e n t s}} ^ {(j)}} \\mathbf {h} _ {i} \\mid C _ {\\text {e l e m e n t s}} ^ {(j)} \\in C _ {\\text {e l e m e n t s}} \\right\\} \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.599, + 0.484, + 0.84 + ], + "angle": 0, + "content": "Defining the position IDs Accurately assigning position IDs to each vector in the new hidden states \\(\\mathbf{H}^{\\prime}\\) is crucial, especially for models using Rotary embeddings, as these IDs determine the input image structure or the temporal dependencies of the input video. In order to achieve a low statistical discrepancy compared to regular inference, we assign the position ID for each vector from \\(H^{\\prime}\\) as its corresponding cluster center. The full PACT pipeline is shown in Algorithm 3. When Rotary Embeddings are used, DBDPC uses the keys after these embeddings are applied, whereas EUTI uses the keys and queries before applying these embeddings. For clarity, we omit this detail in Algorithm 3. We also note that both DBDPC and EUTI, as well as PACT, do not use textual tokens. Therefore, visual token reduction is performed independently of the textual context, making our method well-suited for multi-turn conversations." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.84, + 0.484, + 0.902 + ], + "angle": 0, + "content": "Proportional attention Merging tokens reduces their influence in the attention mechanism and can therefore deteriorate performance if many important tokens are merged together. To mitigate this, we employ proportional attention." + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.089, + 0.71, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.713, + 0.09, + 0.905, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.512, + 0.239, + 0.907, + 0.281 + ], + "angle": 0, + "content": "Figure 5. Comparison between PACT, DBDPC, and EUTI against other visual token reduction methods across various reduction ratios applied on LLaVA-OneVision-7B." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.307, + 0.906, + 0.352 + ], + "angle": 0, + "content": "Let \\( K \\), \\( Q \\), and \\( V \\) denote the keys, queries, and values at a layer \\( L' \\), where \\( L' \\geq L \\). For each attention head \\( j \\), the attention scores are calculated as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.543, + 0.359, + 0.906, + 0.395 + ], + "angle": 0, + "content": "\\[\nA ^ {(j)} = \\operatorname {s o f t m a x} \\left(\\frac {Q ^ {(j)} K ^ {(j) \\top}}{\\sqrt {d _ {l ^ {\\prime}}}} + \\log \\mathbf {W} + \\mathbf {B}\\right) \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.403, + 0.907, + 0.599 + ], + "angle": 0, + "content": "where \\( d_{l'} \\) is the dimensionality of the query for each attention head. Here, \\( \\mathbf{W} \\) is a matrix representing the weight of each token, and \\( \\mathbf{B} \\) is the attention mask. Specifically, for visual tokens, \\( w_{i_0,i_1} \\) represents the size of the cluster corresponding to token \\( i_1 \\), for any value of \\( i_0 \\). For each textual token at position \\( t \\), \\( w_{i_0,t} = 1 \\), as they remain unmerged, retaining a weight of one. By scaling the attention scores based on \\( \\mathbf{W} \\), the model effectively treats each visual token as if it represents multiple tokens. We note that when using proportional attention, we use PyTorch's scaled dot-product attention, which produces similar results to the official FlashAttention implementation while supporting custom masks." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.6, + 0.906, + 0.751 + ], + "angle": 0, + "content": "Selecting the layer \\( L \\) for token reduction: To ensure maximum computational gain, we must choose an early layer \\( L \\) for visual token reduction. However, we also require that the keys at the selected layer are not too similar, allowing for effective clustering and pruning. Thus, we select the earliest layer where the maximum distance between keys is sufficiently high. Figure 4 shows that in the initial layers of LLaVA-OneVision-7B, the keys corresponding to visual tokens are quite similar, indicating a lack of distinctive features necessary for effective pruning and clustering." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.764, + 0.646, + 0.78 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.789, + 0.701, + 0.803 + ], + "angle": 0, + "content": "4.1. Evaluation datasets" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.81, + 0.906, + 0.902 + ], + "angle": 0, + "content": "We evaluate the effectiveness of PACT using diverse benchmarks, similar to those used for LLaVA-OneVision-7B, covering single-image, multi-image, and video tasks. We use AI2D [22], TextVQA [45], ChartQA [37], DocVQA [38], and InfographicVQA [39] to assess PACT's ability to reduce visual tokens while maintaining performance" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.926, + 0.504, + 0.937 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.094, + 0.089, + 0.482, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.224, + 0.483, + 0.266 + ], + "angle": 0, + "content": "Figure 6. Comparison between PACT and other visual token reduction methods across various reduction ratios applied on Qwen2-VL-7B-Instruct." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.292, + 0.483, + 0.504 + ], + "angle": 0, + "content": "in text-rich documents. To test reasoning across multiple disciplines, we use MME [15], MMBench [32], MMVet [51], MathVerse [57], MathVista [34], MMMU [53], MMStar [8], and ScienceQA [33]. Additionally, Vibe-Eval [40], MM-LiveBench [26], and LLaVA-Bench-Wilder [25] evaluate its robustness in real-world scenarios and visual chat contexts. We use LLaVA-Interleave Bench [25] and MuirBench [48] to examine PACT's efficiency in token reduction while preserving inter-image reasoning. To assess performance in video comprehension tasks, we use ActivityNet-QA [52], MLVU [58], VideoMME [16], EgoSchema [36], and PerceptionTest [41]. Finally, Video-ChatGPT [35] evaluates the method's effectiveness in dialogue-based video interaction." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.513, + 0.258, + 0.529 + ], + "angle": 0, + "content": "4.2. Evaluation setup" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.535, + 0.484, + 0.701 + ], + "angle": 0, + "content": "In our comparison, we include approaches where the reduction is applied at a single layer, similar to PACT, such as FastV and clustering-based visual token reduction. For these approaches, we refer to the reduction ratio as the relative reduction in the number of visual tokens, defined as \\(1 - \\frac{\\text{number of visual tokens after reduction}}{\\text{number of visual tokens before reduction}}\\). For all these approaches, we use the same value of \\(L\\) and vary hyperparameters to test across different reduction ratios. For methods that use progressive token reduction, like ToME [6], or apply reduction after the visual encoder, as PruMerge and HiReD, or when the reduction ratio cannot be controlled at a fixed" + }, + { + "type": "image", + "bbox": [ + 0.094, + 0.72, + 0.482, + 0.844 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.855, + 0.483, + 0.896 + ], + "angle": 0, + "content": "Figure 7. Comparison between PACT and other visual token reduction methods across various reduction ratios applied on InternVL2-8B." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.907, + 0.578 + ], + "angle": 0, + "content": "layer, such as VTW, we adjust the parameters of these approaches to achieve the same average number of visual tokens across all layers as the one-layer reduction methods for a given reduction ratio. When evaluating clustering algorithms for visual token reduction, we apply proportional attention, as it consistently improves performance across all clustering algorithms, especially at high reduction ratios. Additionally, it is crucial to correctly assign position IDs to the resulting reduced set of visual tokens. Details on the assignment strategy are presented in Appendix E. When reporting processing time or throughput, we take into account the total time required by both the language model and the reduction algorithm per input element. In the next section, we base our comparison on a metric called the Approach-to-Reference Metric Ratio, defined as the average of the ratio of the metric of the tested approach to the metric obtained without visual token reduction across all test datasets. Formally we have Approach-to-Reference Metric Ratio \\( = \\frac{1}{N} \\sum_{i=1}^{N} \\frac{\\text{Metric with reduction}(i)}{\\text{Metric no reduction}(i)} \\) where \\( N \\) is the total number of test datasets. This metric indicates how much of the original model capacity is retained. It is important to note that when using ToME for visual token reduction, a reduction ratio greater than 50% can't be achieved if the number of visual tokens is reduced by a fixed amount in each layer, as suggested in [6]. Instead, we use a scheduler to achieve higher reduction ratios, which we explain in Appendix F. More details on the hyperparameters used for evaluating PACT are provided in Appendix G. We follow the same dataset splits and metrics used for evaluating LLaVA-OneVision wherever feasible. More details are provided in Appendix H. Note that all experiments were conducted on a single A100 GPU." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.587, + 0.608, + 0.601 + ], + "angle": 0, + "content": "4.3. Results" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.607, + 0.906, + 0.7 + ], + "angle": 0, + "content": "We compare PACT with FastV [9], VTW [30], ToME [6], PruMerge [44] and HiRED [3] on LLaVA-OneVision7B, InternVL2-8B, Qwen2-VL-7B-Instruct and LLaVA1.6-Mistral-7B. Since HiRED and PruMerge are only applicable to LLaVA-1.6, we exclude them from other comparisons. As shown in figures 5, 6, 7, and 8 PACT con" + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.72, + 0.905, + 0.845 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.855, + 0.907, + 0.896 + ], + "angle": 0, + "content": "Figure 8. Comparison between PACT and other visual token reduction methods across various reduction ratios applied on LLaVA-1.6-Mistral-7B." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.089, + 0.907, + 0.145 + ], + "angle": 0, + "content": "Table 2. Comparison of PACT with FastV, VTW, and ToME on LLaVA-OneVision-7B. Algo. Time refers to the average time the algorithm takes per input element, measured in seconds. Proc. Time refers to the average time taken by both the language model and the reduction algorithm per input element. Red. Ratio stands for average Reduction Ratio. The Algo. Time for VTW is nearly zero, and thus omitted. The different visual token reduction methods are evaluated at the same reduction ratio as PACT." + }, + { + "type": "table", + "bbox": [ + 0.093, + 0.156, + 0.907, + 0.411 + ], + "angle": 0, + "content": "
DatasetNo reductionPACT (Ours)FastVVTWToME
MetricProc. TimeMetricRed. RatioProc. TimeAlgo. TimeMetricProc. TimeAlgo. TimeMetricProc. TimeMetricProc. TimeAlgo. Time
VideoMME58.50.79257.665.6%0.3690.02157.00.3710.04046.90.29657.00.4170.091
MME15790.5541564.070.2%0.2430.0171576.00.2440.016842.00.2311556.90.3170.084
DocVQA87.21.08884.467.9%0.5190.02684.30.5240.05110.50.44961.90.5760.099
MLVU65.20.79564.766.4%0.3610.02262.90.3690.04054.40.31263.40.4170.092
LLaVA-Interleave64.10.24964.069.7%0.1330.01058.90.1390.00732.40.12350.30.1920.068
ChartQA79.90.67176.568.5%0.3410.01977.00.3420.01616.60.30763.40.4020.082
MMBench80.60.24980.369.3%0.1350.01079.00.1400.00552.40.12579.70.1930.066
MuirBench42.00.38443.167.8%0.1780.01340.40.1780.00934.90.16240.50.2330.072
ScienceQA95.90.23893.869.6%0.1330.01091.60.1370.00680.00.12493.80.1900.066
MMMU49.20.13948.970.4%0.1040.00748.90.1060.00343.50.09348.60.1240.062
AI2D81.50.38281.069.8%0.1860.01379.40.1910.01469.70.17779.70.2440.073
InfographicVQA66.00.89561.964.7%0.4810.02358.60.4830.04024.50.40848.30.6070.130
MMStar62.00.29760.169.7%0.1470.01158.60.1520.00737.20.16560.10.2290.069
ActivityNetQA54.50.92155.170.0%0.4190.02953.70.4250.04236.60.39454.10.5130.203
MM-LiveBench73.14.43471.767.5%3.2120.04764.43.2210.04441.03.08064.23.6070.102
LLaVA-Wilder71.010.1071.570.0%8.2620.03571.08.2630.02548.87.51568.07.9260.085
MathVerse16.80.83116.674.2%0.3610.02116.10.3820.03617.60.30116.50.5590.150
MathVista63.30.44062.070.7%0.2710.01559.50.2720.01638.50.26055.00.3380.071
MMVet58.04.60258.470.4%3.7930.03551.73.7950.03615.73.65247.24.1150.212
Vibe-Eval41.65.15339.171.1%3.7090.03238.23.7140.04712.33.55031.24.3170.095
VideoChatGPT3.252.9723.2567.2%1.8630.0293.221.8660.0401.921.3203.191.9750.205
EgoSchema60.10.81160.166.6%0.3510.02158.70.3530.04444.80.29759.80.3910.091
PerceptionTest52.10.80152.366.9%0.3530.02351.70.3570.04045.00.29651.10.3930.090
TextVQA75.80.69075.067.2%0.3320.02375.50.3360.02911.60.28762.50.3920.087
" + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.435, + 0.482, + 0.678 + ], + "angle": 0, + "content": "siently outperforms other methods at both equal reduction ratios and equal throughput across all four models. VTW experiences a significant performance drop for reduction ratios above \\(40\\%\\), indicating that removing all visual tokens is only effective when done in later layers. FastV and ToME struggle at high reduction ratios, while PruMerge and HiRED exhibit degradation even at low reduction ratios. Meanwhile, PACT maintains acceptable performance even at high reduction ratios. Table 2 and Table 3 shows that PACT outperforms other approaches on most of the test datasets when applied on LLaVA-OneVision-7B and Qwen2-VL-7B-Instruct. The same conclusion applies to other models, with detailed results provided in Appendix I. In Tab. 1, we report the reduction ratio, throughput, and maximum GPU memory consumption of the different approaches at an equal Approach-to-Reference Metric Ra" + }, + { + "type": "image", + "bbox": [ + 0.095, + 0.706, + 0.287, + 0.827 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.29, + 0.706, + 0.482, + 0.827 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.855, + 0.483, + 0.896 + ], + "angle": 0, + "content": "Figure 9. Comparison of DBDPC and other clustering algorithms for visual token reduction at different reduction ratios on LLaVA-OneVision-7B." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.435, + 0.908, + 0.799 + ], + "angle": 0, + "content": "tio of \\(98.6\\%\\) on LLaVA-OneVision-7B. PACT significantly outperforms the other methods, achieving a reduction ratio of \\(71.3\\%\\), a GPU memory reduction of \\(31\\%\\), and a \\(225\\%\\) speedup in the language model's inference time. The per-dataset results used to compute these metrics are shown in Tab. 5. Tab. 1 also indicates that when using FastV, the maximum GPU memory consumption is relatively high due to the costly computation of attention scores. We further compare DBDPC against agglomerative clustering [1], k-means [2], Density Peaks Clustering (DPC) [5], and DBSCAN [14], with results presented in Fig. 9. The graphs reveal that DBDPC consistently outperforms other clustering algorithms for visual token reduction, exhibiting less performance degradation at equal reduction ratios and demonstrating improved computational efficiency, leading to better throughput. These results validate our hypothesis that, for an effective visual token reduction, it is necessary to ensure that the distances between elements within each cluster do not exceed a predefined threshold. Fig. 5 also shows that EUTI consistently outperforms FastV at equal reduction ratios and is less costly, as it does not require the computation of attention scores. In addition, unlike FastV, EUTI does not introduce a GPU memory overhead1. We provide additional numerical results in Appendix I." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.809, + 0.663, + 0.825 + ], + "angle": 0, + "content": "4.4. Ablation study" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.831, + 0.907, + 0.877 + ], + "angle": 0, + "content": "Fig. 5 shows that PACT consistently outperforms both DBDPC and EUTI across various reduction ratios. This confirms that combining clustering and pruning techniques" + }, + { + "type": "page_footnote", + "bbox": [ + 0.532, + 0.887, + 0.863, + 0.9 + ], + "angle": 0, + "content": "\\(^{1}\\)EUTI achieves roughly the same memory reduction as PACT." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.124, + 0.089, + 0.875, + 0.105 + ], + "angle": 0, + "content": "Table 3. Comparison of PACT with FastV, VTW, and ToME applied on Qwen2-VL-7B-Instruct across Various Datasets." + }, + { + "type": "table", + "bbox": [ + 0.095, + 0.115, + 0.907, + 0.355 + ], + "angle": 0, + "content": "
DatasetNo ReductionPACT (Ours)FastVVTWToME
MetricProc. TimeMetricRed. RatioProc. TimeMetricProc. TimeMetricProc. TimeMetricProc. Time
MME1654.50.2381666.586.3%0.1101500.00.111709.240.1201610.90.140
DocVQA93.90.51690.577.5%0.29486.60.2988.50.24942.90.350
TextVQA81.80.15580.467.5%0.13279.90.13513.20.11866.20.151
InfographicVQA74.60.47870.669.7%0.27863.30.27321.50.22543.90.299
ChartQA80.80.14576.061.1%0.13569.20.13412.90.12355.10.155
MMBench77.60.07477.151.5%0.07777.10.07476.90.07375.90.080
MuirBench40.70.15941.276.9%0.11340.40.11237.90.11175.80.125
MMMU51.40.10951.272.6%0.09349.30.09245.40.08848.90.105
AI2D79.90.10578.464.2%0.09676.20.09769.00.08776.40.115
MMStar56.00.07254.861.3%0.07251.50.06740.30.06553.80.077
EgoSchema62.10.36061.660.0%0.20760.20.21246.30.19061.20.230
MathVerse25.30.62024.582.2%0.39323.70.39613.90.29618.10.651
MathVista59.20.24957.773.3%0.19556.40.19436.80.16553.50.275
MM Vet24.94.70025.180.3%3.82022.33.8302.73.65016.74.780
Vibe-Eval47.53.20046.185.0%2.31044.32.37513.11.99329.63.620
LLaVA-Interleave35.90.12035.573.7%0.10034.70.10133.20.09635.30.125
MM-LiveBench72.63.97070.777.1%3.04063.03.12039.72.97057.64.450
" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.38, + 0.486, + 0.697 + ], + "angle": 0, + "content": "yields better performance than using each approach independently, as the combined method addresses both visual tokens irrelevance and redundancy. We ablate several components of the DBDPC algorithm and present the results in Fig. 10. First, we ablate token merging by selecting the center of each cluster as the representative token instead of merging tokens within each cluster. We also ablate the use of proportional attention. Additionally, we ablate the assignment of position IDs to the reduced set of tokens and experiment with two alternatives: using the mean of position IDs of all elements in each cluster and assigning position IDs sequentially after reordering the reduced set according to the mean of position IDs. Finally, we ablate the use of key vectors in the clustering process and instead use hidden states. Our results show that each ablated component contributes positively to the performance of the DBDPC algorithm. Notably, correctly assigning position IDs to the reduced set is crucial, as these position IDs reflect the structure of input images and the temporal order of input videos. Additionally, proportional attention proves effective at higher reduction ratios, while token merging en" + }, + { + "type": "image", + "bbox": [ + 0.095, + 0.734, + 0.287, + 0.858 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.29, + 0.736, + 0.482, + 0.857 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.868, + 0.483, + 0.896 + ], + "angle": 0, + "content": "Figure 10. Ablation study of DBDPC and EUTI on LLaVA-OneVision-7B." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.38, + 0.909, + 0.742 + ], + "angle": 0, + "content": "hances performance once the reduction ratio exceeds \\(50\\%\\). The figure also confirms that keys are better suited for cosine similarity-based distance calculations, as they are naturally used in dot products within the attention mechanism. We perform two separate ablations on Eq. (2) of the EUTI algorithm. The first ablation removes the use of hidden state norms, while the second ablates the use of the global query, which corresponds to using only the hidden state norms. The results in Fig. 10 show that combining both the global query-based score and the norm of hidden states consistently leads to better results than using either metric alone, suggesting that they provide complementary information about the importance of each visual token. Finally, we ablate the pruned token recovery module in PACT by setting \\(\\alpha\\) to zero, with results presented in Fig. 11. The plot shows that reintegrating visual tokens initially deemed unimportant but close enough to a cluster center consistently enhances performance across different reduction ratios, supporting our hypothesis that these tokens were likely mislabeled by the EUTI module. Figure 11 also shows the effect of the choice of the reduction layer on PACT's performance, demonstrating the effectiveness of our reduction layer identification approach. We provide additional numerical results in Appendix J." + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.761, + 0.693, + 0.871 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.697, + 0.761, + 0.904, + 0.871 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.517, + 0.883, + 0.904, + 0.898 + ], + "angle": 0, + "content": "Figure 11. Ablation study of PACT on LLaVA-OneVision-7B." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.926, + 0.505, + 0.936 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.092, + 0.09, + 0.21, + 0.106 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.115, + 0.484, + 0.311 + ], + "angle": 0, + "content": "In this work, we presented PACT, a method that addresses both visual token irrelevance and redundancy. PACT is a plug-and-play solution that does not require additional training. It does not rely on textual tokens for visual token reduction, making it well-suited for multi-turn conversations. Additionally, it operates independently of the visual encoder and connector architecture, making it broadly applicable across various Visual Language Models. Our results confirm that the number of visual tokens in Visual Language Models is unnecessarily large and provide valuable insights for effective token reduction. This opens the door for future work in designing more efficient connectors and architectures for VLMs." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.327, + 0.269, + 0.344 + ], + "angle": 0, + "content": "6. Acknowledgments" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.351, + 0.484, + 0.435 + ], + "angle": 0, + "content": "This work received financial support from Crédit Agricole S.A. through the research chair with Ecole Polytechnique on Trustworthy and Responsible AI. This work was granted access to the HPC resources of IDRIS under the allocation 2024-AD011014793R1 made by GENCI." + }, + { + "type": "title", + "bbox": [ + 0.093, + 0.45, + 0.188, + 0.466 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.1, + 0.474, + 0.483, + 0.516 + ], + "angle": 0, + "content": "[1] Marcel R Ackermann, Johannes Blömer, Daniel Kuntze, and Christian Sohler. Analysis of agglomerative clustering. *Algorithmica*, 69:184-215, 2014. 8, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.1, + 0.518, + 0.484, + 0.572 + ], + "angle": 0, + "content": "[2] Mohiuddin Ahmed, Raihan Seraj, and Syed Mohammed Shamsul Islam. The k-means algorithm: A comprehensive survey and performance evaluation. Electronics, 9(8):1295, 2020. 5, 8, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.575, + 0.484, + 0.657 + ], + "angle": 0, + "content": "[3] Kazi Hasan Ibn Arif, JinYi Yoon, Dimitrios S Nikolopoulos, Hans Vandierendonck, Deepu John, and Bo Ji. Hired: Attention-guided token dropping for efficient inference of high-resolution vision-language models in resource-constrained environments. arXiv preprint arXiv:2408.10945, 2024. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.66, + 0.483, + 0.715 + ], + "angle": 0, + "content": "[4] Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-vl: A frontier large vision-language model with versatile abilities. arXiv preprint arXiv:2308.12966, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.717, + 0.483, + 0.758 + ], + "angle": 0, + "content": "[5] Panthadeep Bhattacharjee and Pinaki Mitra. A survey of density based clustering algorithms. Frontiers of Computer Science, 15:1-27, 2021. 5, 8, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.76, + 0.484, + 0.815 + ], + "angle": 0, + "content": "[6] Daniel Bolya, Cheng-Yang Fu, Xiaoliang Dai, Peizhao Zhang, Christoph Feichtenhofer, and Judy Hoffman. Token merging: Your vit but faster. arXiv preprint arXiv:2210.09461, 2022. 2, 7, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.817, + 0.483, + 0.871 + ], + "angle": 0, + "content": "[7] Keqin Chen, Zhao Zhang, Weili Zeng, Richong Zhang, Feng Zhu, and Rui Zhao. Shikra: Unleashing multimodal llm's referential dialogue magic. arXiv preprint arXiv:2306.15195, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.873, + 0.483, + 0.902 + ], + "angle": 0, + "content": "[8] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Jiaqi Wang, Yu Qiao," + }, + { + "type": "list", + "bbox": [ + 0.1, + 0.474, + 0.484, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.545, + 0.093, + 0.907, + 0.133 + ], + "angle": 0, + "content": "Dahua Lin, et al. Are we on the right way for evaluating large vision-language models? arXiv preprint arXiv:2403.20330, 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.136, + 0.907, + 0.205 + ], + "angle": 0, + "content": "[9] Liang Chen, Haozhe Zhao, Tianyu Liu, Shuai Bai, Junyang Lin, Chang Zhou, and Baobao Chang. An image is worth 1/2 tokens after layer 2: Plug-and-play inference acceleration for large vision-language models. arXiv preprint arXiv:2403.06764, 2024. 2, 3, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.207, + 0.906, + 0.275 + ], + "angle": 0, + "content": "[10] Zhe Chen, Weiyun Wang, Hao Tian, Shenglong Ye, Zhang-wei Gao, Erfei Cui, Wenwen Tong, Kongzhi Hu, Jiapeng Luo, Zheng Ma, et al. How far are we to gpt-4v? closing the gap to commercial multimodal models with open-source suites. arXiv preprint arXiv:2404.16821, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.278, + 0.906, + 0.346 + ], + "angle": 0, + "content": "[11] Yunfei Chu, Jin Xu, Xiaohuan Zhou, Qian Yang, Shiliang Zhang, Zhijie Yan, Chang Zhou, and Jingren Zhou. Qwen-audio: Advancing universal audio understanding via unified large-scale audio-language models. arXiv preprint arXiv:2311.07919, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.349, + 0.906, + 0.389 + ], + "angle": 0, + "content": "[12] Tri Dao, Daniel Y. Fu, Stefano Ermon, Atri Rudra, and Christopher Ré. Flashattention: Fast and memory-efficient exact attention with io-awareness, 2022. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.392, + 0.906, + 0.46 + ], + "angle": 0, + "content": "[13] Mohamed Dhouib, Ghassen Bettaieb, and Aymen Shabou. Docparser: End-to-endOCR-free information extraction from visually rich documents. In International Conference on Document Analysis and Recognition, pages 155-172. Springer, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.463, + 0.906, + 0.517 + ], + "angle": 0, + "content": "[14] Martin Ester, Hans-Peter Kriegel, Jörg Sander, Xiaowei Xu, et al. A density-based algorithm for discovering clusters in large spatial databases with noise. In kdd, pages 226-231, 1996. 5, 8, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.52, + 0.906, + 0.587 + ], + "angle": 0, + "content": "[15] Chaoyou Fu, Peixian Chen, Yunhang Shen, Yulei Qin, Mengdan Zhang, Xu Lin, Jinrui Yang, Xiawu Zheng, Ke Li, Xing Sun, et al. Mme: A comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394, 2023. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.591, + 0.906, + 0.659 + ], + "angle": 0, + "content": "[16] Chaoyou Fu, Yuhan Dai, Yondong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, et al. Video-mme: The first-ever comprehensive evaluation benchmark of multi-modal llms in video analysis. arXiv preprint arXiv:2405.21075, 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.661, + 0.906, + 0.729 + ], + "angle": 0, + "content": "[17] Tao Gong, Chengqi Lyu, Shilong Zhang, Yudong Wang, Miao Zheng, Qian Zhao, Kuikun Liu, Wenwei Zhang, Ping Luo, and Kai Chen. Multimodal-gpt: A vision and language model for dialogue with humans. arXiv preprint arXiv:2305.04790, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.732, + 0.906, + 0.787 + ], + "angle": 0, + "content": "[18] Jiaming Han, Kaixiong Gong, Yiyuan Zhang, Jiaqi Wang, Kaipeng Zhang, Dahua Lin, Yu Qiao, Peng Gao, and Xiangyu Yue. Onellm: One framework to align all modalities with language. arXiv preprint arXiv:2312.03700, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.789, + 0.906, + 0.857 + ], + "angle": 0, + "content": "[19] Shaohan Huang, Li Dong, Wenhui Wang, Yaru Hao, Saksham Singhal, Shuming Ma, Tengchao Lv, Lei Cui, Owais Khan Mohammed, Qiang Liu, et al. Language is not all you need: Aligning perception with language models. arXiv preprint arXiv:2302.14045, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.86, + 0.906, + 0.9 + ], + "angle": 0, + "content": "[20] Eric Jang, Shixiang Gu, and Ben Poole. Categorical reparameterization with gumbel-softmax. arXiv preprint arXiv:1611.01144, 2016. 2" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.907, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.092, + 0.482, + 0.161 + ], + "angle": 0, + "content": "[21] Yang Jin, Kun Xu, Liwei Chen, Chao Liao, Jianchao Tan, Bin Chen, Chenyi Lei, An Liu, Chengru Song, Xiaogiang Lei, et al. Unified language-vision pretraining with dynamic discrete visual tokenization. arXiv preprint arXiv:2309.04669, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.164, + 0.482, + 0.219 + ], + "angle": 0, + "content": "[22] Aniruddha Kembhavi, Mike Salvato, Eric Kolve, Minjoon Seo, Hannaneh Hajishirzi, and Ali Farhadi. A diagram is worth a dozen images. In European conference on computer vision, pages 235-251. Springer, 2016. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.221, + 0.482, + 0.302 + ], + "angle": 0, + "content": "[23] Geewook Kim, Teakgyu Hong, Moonbin Yim, JeongYeon Nam, Jinyoung Park, Jinyeong Yim, Wonseok Hwang, Sangdoo Yun, Dongyoon Han, and Seunghyun Park. Ocr-free document understanding transformer. In Computer Vision – ECCV 2022, pages 498–517, Cham, 2022. Springer Nature Switzerland. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.304, + 0.482, + 0.358 + ], + "angle": 0, + "content": "[24] Bohao Li, Rui Wang, Guangzhi Wang, Yuying Ge, Yixiao Ge, and Ying Shan. Seed-bench: Benchmarking multimodal llms with generative comprehension. arXiv preprint arXiv:2307.16125, 2023. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.362, + 0.482, + 0.416 + ], + "angle": 0, + "content": "[25] Bo Li, Kaichen Zhang, Hao Zhang, Dong Guo, Renrui Zhang, Feng Li, Yuanhan Zhang, Ziwei Liu, and Chunyuan Li. Llava-last: Stronger llms supercharge multimodal capabilities in the wild, 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.419, + 0.482, + 0.473 + ], + "angle": 0, + "content": "[26] Bo Li, Peiyuan Zhang, Kaichen Zhang, Fanyi Pu, Xinrun Du, Yuhao Dong, Haotian Liu, Yuanhan Zhang, Ge Zhang, Chunyuan Li, and Ziwei Liu. Lmms-eval: Accelerating the development of large multimodal models, 2024. 7, 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.476, + 0.482, + 0.531 + ], + "angle": 0, + "content": "[27] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024. 2, 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.533, + 0.482, + 0.587 + ], + "angle": 0, + "content": "[28] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. arXiv preprint arXiv:2301.12597, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.589, + 0.482, + 0.644 + ], + "angle": 0, + "content": "[29] Youwei Liang, Chongjian Ge, Zhan Tong, Yibing Song, Jue Wang, and Pengtao Xie. Not all patches are what you need: Expediting vision transformers via token reorganizations. arXiv preprint arXiv:2202.07800, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.647, + 0.482, + 0.701 + ], + "angle": 0, + "content": "[30] Zhihang Lin, Mingbao Lin, Luxi Lin, and Rongrong Ji. Boosting multimodal large language models with visual tokens withdrawal for rapid inference. arXiv preprint arXiv:2405.05803, 2024. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.703, + 0.482, + 0.743 + ], + "angle": 0, + "content": "[31] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. arXiv preprint arXiv:2304.08485, 2023. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.746, + 0.482, + 0.787 + ], + "angle": 0, + "content": "[32] Yuan Liu, Haodong Duan, Yuanhan Zhang, Songyang Zhang Bo Li, and Wangbo Zhao. Mmbench: Is your multi-modal model an all-around player? arXiv:2307.06281, 2023. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.789, + 0.482, + 0.844 + ], + "angle": 0, + "content": "[33] Pan Lu, Swaroop Mishra, Tony Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter Clark, and Ashwin Kalyan. Learn to explain: Multimodal reasoning via thought chains for science question answering, 2022. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.846, + 0.482, + 0.9 + ], + "angle": 0, + "content": "[34] Pan Lu, Hritik Bansal, Tony Xia, Jiacheng Liu, Chunyuan Li, Hannaneh Hajishirzi, Hao Cheng, Kai-Wei Chang, Michel Galley, and Jianfeng Gao. Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts. In In" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.092, + 0.482, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.548, + 0.093, + 0.905, + 0.119 + ], + "angle": 0, + "content": "ternational Conference on Learning Representations (ICLR), 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.122, + 0.905, + 0.191 + ], + "angle": 0, + "content": "[35] Muhammad Maaz, Hanoona Rasheed, Salman Khan, and Fahad Shahbaz Khan. Video-chatgpt: Towards detailed video understanding via large vision and language models. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (ACL 2024), 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.193, + 0.905, + 0.248 + ], + "angle": 0, + "content": "[36] Karttikeya Mangalam, Raiymbek Akshulakov, and Jitendra Malik. Egoschema: A diagnostic benchmark for very long-form video language understanding. Advances in Neural Information Processing Systems, 36:46212-46244, 2023. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.25, + 0.905, + 0.305 + ], + "angle": 0, + "content": "[37] Ahmed Masry, Do Xuan Long, Jia Qing Tan, Shafiq Joty, and Enamul Hoque. Chartqa: A benchmark for question answering about charts with visual and logical reasoning. arXiv preprint arXiv:2203.10244, 2022. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.307, + 0.907, + 0.362 + ], + "angle": 0, + "content": "[38] Minesh Mathew, Dimosthenis Karatzas, and CV Jawahar. Docvqa: A dataset for vqa on document images. In Proceedings of the IEEE/CVF winter conference on applications of computer vision, pages 2200-2209, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.364, + 0.905, + 0.42 + ], + "angle": 0, + "content": "[39] Minesh Mathew, Viraj Bagal, Ruben Tito, Dimosthenis Karatzas, Ernest Valveny, and CV Jawahar. Infographicvqa. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 1697-1706, 2022. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.421, + 0.905, + 0.531 + ], + "angle": 0, + "content": "[40] Piotr Padlewski, Max Bain, Matthew Henderson, Zhongkai Zhu, Nishant Relan, Hai Pham, Donovan Ong, Kaloyan Aleksiev, Aitor Ormazabal, Samuel Phua, Ethan Yeo, Eugenie Lamprecht, Qi Liu, Yuqi Wang, Eric Chen, Deyu Fu, Lei Li, Che Zheng, Cyprien de Masson d'Autume, Dani Yogatama, Mikel Artetxe, and Yi Tay. Vibe-eval: A hard evaluation suite for measuring progress of multimodal language models. arXiv preprint arXiv:2405.02287, 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.533, + 0.905, + 0.615 + ], + "angle": 0, + "content": "[41] Viorica Patraucean, Lucas Smaira, Ankush Gupta, Adria Recasens, Larisa Markeeva, Dylan Banarse, Skanda Koppula, Mateusz Malinowski, Yi Yang, Carl Doersch, et al. Perception test: A diagnostic benchmark for multimodal video models. Advances in Neural Information Processing Systems, 36, 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.617, + 0.905, + 0.687 + ], + "angle": 0, + "content": "[42] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.689, + 0.905, + 0.716 + ], + "angle": 0, + "content": "[43] Erich Schubert. A Triangle Inequality for Cosine Similarity, page 32-44. Springer International Publishing, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.718, + 0.905, + 0.772 + ], + "angle": 0, + "content": "[44] Yuzhang Shang, Mu Cai, Bingxin Xu, Yong Jae Lee, and Yan Yan. Llava-prumerge: Adaptive token reduction for efficient large multimodal models. arXiv preprint arXiv:2403.15388, 2024. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.775, + 0.905, + 0.844 + ], + "angle": 0, + "content": "[45] Amanpreet Singh, Vivek Natarajan, Meet Shah, Yu Jiang, Xinlei Chen, Dhruv Batra, Devi Parikh, and Marcus Rohrbach. Towards vqa models that can read. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8317-8326, 2019. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.846, + 0.905, + 0.9 + ], + "angle": 0, + "content": "[46] Dingjie Song, Wenjun Wang, Shunian Chen, Xidong Wang, Michael Guan, and Benyou Wang. Less is more: A simple yet effective token reduction method for efficient multimodal llms. arXiv preprint arXiv:2409.10994, 2024. 2" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.907, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.508, + 0.937 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.093, + 0.482, + 0.146 + ], + "angle": 0, + "content": "[47] Jianlin Su, Murtadha Ahmed, Yu Lu, Shengfeng Pan, Wen Bo, and Yunfeng Liu. Roformer: Enhanced transformer with rotary position embedding. Neurocomputing, 568:127063, 2024. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.149, + 0.482, + 0.218 + ], + "angle": 0, + "content": "[48] Fei Wang, Xingyu Fu, James Y Huang, Zekun Li, Qin Liu, Xiaogeng Liu, Mingyu Derek Ma, Nan Xu, Wenxuan Zhou, Kai Zhang, et al. Muirbench: A comprehensive benchmark for robust multi-image understanding. arXiv preprint arXiv:2406.09411, 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.22, + 0.482, + 0.302 + ], + "angle": 0, + "content": "[49] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Yang Fan, Kai Dang, Mengfei Du, Xuancheng Ren, Rui Men, Dayiheng Liu, Chang Zhou, Jingren Zhou, and Junyang Lin. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.304, + 0.482, + 0.372 + ], + "angle": 0, + "content": "[50] Qinghao Ye, Haiyang Xu, Guohai Xu, Jiabo Ye, Ming Yan, Yiyang Zhou, Junyang Wang, Anwen Hu, Pengcheng Shi, Yaya Shi, et al. mplug-owl: Modularization empowers large language models with multimodality. arXiv preprint arXiv:2304.14178, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.375, + 0.482, + 0.429 + ], + "angle": 0, + "content": "[51] Weihao Yu, Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Zicheng Liu, Xinchao Wang, and Lijuan Wang. Mm-vet: Evaluating large multimodal models for integrated capabilities, 2023. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.431, + 0.482, + 0.486 + ], + "angle": 0, + "content": "[52] Zhou Yu, Dejing Xu, Jun Yu, Ting Yu, Zhou Zhao, Yueting Zhuang, and Dacheng Tao. Activitynet-qa: A dataset for understanding complex web videos via question answering. In AAAI, pages 9127–9134, 2019. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.488, + 0.482, + 0.598 + ], + "angle": 0, + "content": "[53] Xiang Yue, Yuansheng Ni, Kai Zhang, Tianyu Zheng, Ruoqi Liu, Ge Zhang, Samuel Stevens, Dongfu Jiang, Weiming Ren, Yuxuan Sun, Cong Wei, Botao Yu, Ruibin Yuan, Ren-liang Sun, Ming Yin, Boyuan Zheng, Zhenzhu Yang, Yibo Liu, Wenhao Huang, Huan Sun, Yu Su, and Wenhu Chen. Mmmu: A massive multi-discipline multimodal understanding and reasoning benchmark for expert agi. In Proceedings of CVPR, 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.6, + 0.482, + 0.64 + ], + "angle": 0, + "content": "[54] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.643, + 0.482, + 0.697 + ], + "angle": 0, + "content": "[55] Duzhen Zhang, Yahan Yu, Chenxing Li, Jiahua Dong, Dan Su, Chenhui Chu, and Dong Yu. Mm-llms: Recent advances in multimodal large language models. arXiv preprint arXiv:2401.13601, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.7, + 0.482, + 0.74 + ], + "angle": 0, + "content": "[56] Hang Zhang, Xin Li, and Lidong Bing. Video-llama: An instruction-tuned audio-visual language model for video understanding. arXiv preprint arXiv:2306.02858, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.742, + 0.482, + 0.81 + ], + "angle": 0, + "content": "[57] Renrui Zhang, Dongzhi Jiang, Yichi Zhang, Haokun Lin, Ziyu Guo, Pengshuo Qiu, Aojun Zhou, Pan Lu, Kai-Wei Chang, Peng Gao, and Hongsheng Li. Mathverse: Does your multi-modal llm truly see the diagrams in visual math problems?, 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.812, + 0.482, + 0.88 + ], + "angle": 0, + "content": "[58] Junjie Zhou, Yan Shu, Bo Zhao, Boya Wu, Shitao Xiao, Xi Yang, Yongping Xiong, Bo Zhang, Tiejun Huang, and Zheng Liu. Mlvu: A comprehensive benchmark for multi-task long video understanding. arXiv preprint arXiv:2406.04264, 2024. 7" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.093, + 0.482, + 0.88 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.508, + 0.936 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.12, + 0.1, + 0.885, + 0.136 + ], + "angle": 0, + "content": "PACT: Pruning and Clustering-Based Token Reduction for Faster Visual Language Models Supplementary Materials" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.155, + 0.471, + 0.172 + ], + "angle": 0, + "content": "A. On the density peaks clustering algorithm" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.181, + 0.484, + 0.452 + ], + "angle": 0, + "content": "Density Peak Clustering (DPC) is a clustering algorithm that identifies cluster centers based on local density and the distance to points with higher density, denoted as \\(\\delta_{i}\\). The density, \\(\\rho_{i}\\), can be measured by counting the number of points within a cutoff distance \\(d_{c}\\) from \\(\\mathbf{u}_{i}\\), or by using a Gaussian function where nearby points contribute more to the density, \\(\\rho_{i} = \\sum_{j}\\exp \\left(-\\left(\\frac{d_{ij}}{d_c}\\right)^2\\right)\\). Points with high \\(\\rho_{i}\\) and \\(\\delta_{i}\\) values are selected as cluster centers. This selection can be done by defining a threshold \\(t\\) and designating points as cluster centers where \\(\\rho_{i}\\cdot \\delta_{i}\\geq t\\times \\max (\\rho_{i}\\cdot \\delta_{i})\\), or by selecting a fixed percentage. Other points are then assigned to the cluster of the nearest higher-density point, iterating from the highest to the lowest density. This process can create clusters of varying shapes, where the maximum distance between elements within a cluster can be extremely large. In extreme cases, the two farthest points in the input data can end up in the same cluster." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.467, + 0.32, + 0.483 + ], + "angle": 0, + "content": "B. DBDPC Characteristics" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.493, + 0.484, + 0.539 + ], + "angle": 0, + "content": "This section aims to prove that DBDPC guarantees that: Each element's distance to its assigned cluster center is at most \\( d_{c} \\) and that all cluster centers are at least \\( d_{c} \\) apart." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.539, + 0.483, + 0.568 + ], + "angle": 0, + "content": "Assume, for contradiction, that at least one of the following statements is false:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.569, + 0.483, + 0.614 + ], + "angle": 0, + "content": "1. There exists an element \\(i\\) assigned to a cluster such that its distance to the cluster center is greater than \\(d_{c}\\), i.e., \\(d_{is} > d_{c}\\)." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.615, + 0.483, + 0.647 + ], + "angle": 0, + "content": "2. There exist two cluster centers \\( s_1, s_2 \\) such that their pairwise distance is at most \\( d_c \\), i.e., \\( d_{s_1s_2} \\leq d_c \\)." + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.569, + 0.483, + 0.647 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.666, + 0.483, + 0.696 + ], + "angle": 0, + "content": "Contradiction for Assumption 1 In DBDPC, each element \\(i\\) is assigned to its closest cluster center:" + }, + { + "type": "equation", + "bbox": [ + 0.216, + 0.71, + 0.358, + 0.733 + ], + "angle": 0, + "content": "\\[\ns_{i} = \\arg \\min_{s\\in C_{\\text{centers}}}d_{is}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.744, + 0.484, + 0.851 + ], + "angle": 0, + "content": "If \\( d_{is} > d_c \\) for a given center \\( s \\), then we have \\( d_{is'} > d_c \\) for all centers. However, in the DBDPC selection process, an element is assigned as a cluster center if its minimum distance to already selected centers is over \\( d_c \\). Thus, \\( i \\) should have been selected as a new cluster center, and its distance to the closest cluster center would be zero, which leads to a contradiction, proving that every element satisfies \\( d_{is} \\leq d_c \\)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.871, + 0.484, + 0.902 + ], + "angle": 0, + "content": "Contradiction for Assumption 2 Assume, without loss of generality, that \\( s_2 \\) is chosen after \\( s_1 \\). By the center selec" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.157, + 0.822, + 0.172 + ], + "angle": 0, + "content": "tion criterion, a new center \\(s_2\\) is added only if:" + }, + { + "type": "equation", + "bbox": [ + 0.646, + 0.184, + 0.771, + 0.207 + ], + "angle": 0, + "content": "\\[\n\\min_{s\\in C_{\\text{centers}}}d_{s_{2}s} > d_{c}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.216, + 0.905, + 0.261 + ], + "angle": 0, + "content": "If \\( d_{s_1 s_2} \\leq d_c \\), then \\( s_2 \\) shouldn't be selected as a cluster center, which leads to a contradiction. Thus, no two centers can be closer than \\( d_c \\)." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.262, + 0.906, + 0.352 + ], + "angle": 0, + "content": "Inter-cluster distance upper-bound : Here we will refer to cosine similarity by sim. Let's \\( x \\) and \\( y \\) be two points in the same cluster, and \\( s \\) their cluster center. Since each point \\( \\mathbf{x} \\) is within \\( d_c \\) of its cluster center \\( \\mathbf{s} \\) and the distance used in the DBDPC algorithm is \\( 1 - \\mathrm{sim} \\), we have \\( \\mathrm{sim}(\\mathbf{x},\\mathbf{s})\\geq 1 - d_c \\). We have from [43]:" + }, + { + "type": "equation", + "bbox": [ + 0.553, + 0.363, + 0.866, + 0.38 + ], + "angle": 0, + "content": "\\[\n\\operatorname {s i m} (\\mathbf {x}, \\mathbf {y}) \\geq \\operatorname {s i m} (\\mathbf {x}, \\mathbf {s}) \\cdot \\operatorname {s i m} (\\mathbf {s}, \\mathbf {y}) + m - 1,\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.566, + 0.389, + 0.851, + 0.415 + ], + "angle": 0, + "content": "\\[\n\\text {w h e r e} m = \\min \\left\\{\\operatorname {s i m} (\\mathbf {x}, \\mathbf {s}) ^ {2}, \\operatorname {s i m} (\\mathbf {s}, \\mathbf {y}) ^ {2} \\right\\}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.421, + 0.807, + 0.436 + ], + "angle": 0, + "content": "Using \\(\\mathrm{sim}(\\mathbf{x},\\mathbf{s}),\\mathrm{sim}(\\mathbf{s},\\mathbf{y})\\geq 1 - d_c\\) we get" + }, + { + "type": "equation", + "bbox": [ + 0.513, + 0.446, + 0.911, + 0.463 + ], + "angle": 0, + "content": "\\[\n\\operatorname {s i m} (\\mathbf {x}, \\mathbf {y}) \\geq (1 - d _ {c}) ^ {2} + (1 - d _ {c}) ^ {2} - 1 = 1 - 2 d _ {c} (2 - d _ {c}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.474, + 0.906, + 0.505 + ], + "angle": 0, + "content": "Finally, converting this back to the distance \\( d(\\mathbf{x}, \\mathbf{y}) = 1 - \\sin(\\mathbf{x}, \\mathbf{y}) \\), we obtain:" + }, + { + "type": "equation", + "bbox": [ + 0.622, + 0.515, + 0.796, + 0.532 + ], + "angle": 0, + "content": "\\[\nd (\\mathbf {x}, \\mathbf {y}) \\leq 2 d _ {c} (2 - d _ {c}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.543, + 0.906, + 0.573 + ], + "angle": 0, + "content": "Therefore, the intra-cluster distance in the DBDPC algorithm is bounded by \\(2d_{c}(2 - d_{c})\\)." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.586, + 0.906, + 0.621 + ], + "angle": 0, + "content": "C. A comparison between DBDPC and other clustering algorithms" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.629, + 0.907, + 0.87 + ], + "angle": 0, + "content": "Comparison between DBDPC and DPC: We note that, aside from using densities, DBDPC is fundamentally different from DPC. Please refer to Appendix A for a detailed explanation of the DPC algorithm. The center identification process in DBDPC results in two main characteristics with formal proof detailed in Appendix B. First, the distance between each element and its cluster center is below \\( d_{c} \\), which leads to inter-cluster distances being upper-bounded by \\( 2d_{c} \\times (2 - d_{c}) \\). Additionally, the distance between cluster centers is lower-bounded by \\( d_{c} \\). These guarantees do not hold for DPC, leading to two drawbacks. Since intercluster distances are not controlled, merging these vectors may result in merging highly dissimilar vectors, leading to information loss. Also, in high-density regions, the distance between cluster centers becomes too small, making DPC ineffective in addressing information redundancy." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.871, + 0.906, + 0.901 + ], + "angle": 0, + "content": "A Qualitative comparison Figure 12 presents the clustering results for DBDPC, DPC, DBSCAN, and K-Means on a" + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.926, + 0.504, + 0.936 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "code_caption", + "bbox": [ + 0.092, + 0.091, + 0.482, + 0.12 + ], + "angle": 0, + "content": "Algorithm 4 Recursive Center Identification for DBDPC with Iterative Center Identification" + }, + { + "type": "code", + "bbox": [ + 0.092, + 0.124, + 0.484, + 0.171 + ], + "angle": 0, + "content": "Input: Cutoff distance \\(d_{c}\\in \\mathbb{R}^{+}\\) , set of vectors \\(\\mathbf{U} = \\{\\mathbf{u}_i\\in\\) \\(\\mathbb{R}^{d_l}\\}_{i = 1}^n\\) , density values \\(\\{\\rho_i\\}_{i = 1}^n\\) , distance matrix \\(D =\\) \\([d_{ij}]\\) , fallback threshold \\(T > 0\\)" + }, + { + "type": "code", + "bbox": [ + 0.092, + 0.171, + 0.353, + 0.2 + ], + "angle": 0, + "content": "Output: Cluster center indices \\( C_{\\text{centers}} \\) Initialize cluster center set \\( C_{\\text{centers}} = \\)" + }, + { + "type": "text", + "bbox": [ + 0.108, + 0.201, + 0.306, + 0.216 + ], + "angle": 0, + "content": "Set the density of each point :" + }, + { + "type": "equation", + "bbox": [ + 0.204, + 0.227, + 0.386, + 0.247 + ], + "angle": 0, + "content": "\\[\n\\rho_ {i} = \\mathrm {a r g s o r t} \\big (\\{- \\rho_ {j} \\} _ {j = 1} ^ {n} \\big) [ i ]\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.108, + 0.255, + 0.221, + 0.269 + ], + "angle": 0, + "content": "while \\(\\mathbf{U}\\neq \\emptyset\\) do" + }, + { + "type": "text", + "bbox": [ + 0.132, + 0.27, + 0.366, + 0.286 + ], + "angle": 0, + "content": "Compute \\(\\delta_{i}\\) for all vectors \\(\\mathbf{u}_i\\in \\mathbf{U}\\)" + }, + { + "type": "equation", + "bbox": [ + 0.246, + 0.298, + 0.343, + 0.322 + ], + "angle": 0, + "content": "\\[\n\\delta_ {i} = \\min _ {\\rho_ {j} > \\rho_ {i}} d _ {i j}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.132, + 0.332, + 0.302, + 0.346 + ], + "angle": 0, + "content": "Select cluster candidates:" + }, + { + "type": "equation", + "bbox": [ + 0.2, + 0.359, + 0.39, + 0.376 + ], + "angle": 0, + "content": "\\[\n\\mathbf {C} _ {\\text {n e w}} = \\left\\{\\mathbf {u} _ {i} \\in \\mathbf {U} \\mid \\delta_ {i} > d _ {c} \\right\\}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.132, + 0.387, + 0.303, + 0.402 + ], + "angle": 0, + "content": "\\(C_{\\mathrm{centers}} \\gets C_{\\mathrm{centers}} \\cup \\mathbf{C}_{\\mathrm{new}}\\)" + }, + { + "type": "text", + "bbox": [ + 0.133, + 0.403, + 0.309, + 0.418 + ], + "angle": 0, + "content": "Update remaining vectors:" + }, + { + "type": "equation", + "bbox": [ + 0.107, + 0.44, + 0.48, + 0.476 + ], + "angle": 0, + "content": "\\[\n\\mathbf {U} \\leftarrow \\mathbf {U} \\backslash \\left(\\mathbf {C} _ {\\text {n e w}} \\cup \\left\\{\\mathbf {u} _ {k} \\in \\mathbf {U} \\mid \\begin{array}{c} \\exists \\mathbf {u} _ {i} \\in \\mathbf {C} _ {\\text {n e w}} \\\\ \\text {s u c h t h a t} d _ {i k} \\leq d _ {c} \\end{array} \\right\\}\\right)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.133, + 0.484, + 0.262, + 0.5 + ], + "angle": 0, + "content": "if \\(|\\mathbf{C}_{\\mathrm{new}}| < T\\) then" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.501, + 0.456, + 0.515 + ], + "angle": 0, + "content": "Order remaining vectors \\(\\mathbf{U}\\) by decreasing \\(\\rho_{i}\\):" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.516, + 0.457, + 0.531 + ], + "angle": 0, + "content": "\\(\\mathbf{U}\\gets \\mathrm{Sort}(\\mathbf{U},\\mathrm{key} = \\rho_{i},\\mathrm{order} = \\mathrm{descending})\\)" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.531, + 0.389, + 0.544 + ], + "angle": 0, + "content": "Call Iterative Center Identification:" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.545, + 0.514, + 0.561 + ], + "angle": 0, + "content": "\\(C_{\\mathrm{centers}} \\gets\\) IterativeCenterIdentification(\\(C_{\\mathrm{centers}}\\), \\(\\mathbf{U}\\), \\(d_c\\))" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.562, + 0.254, + 0.575 + ], + "angle": 0, + "content": "return \\(C_{\\mathrm{centers}}\\)" + }, + { + "type": "text", + "bbox": [ + 0.133, + 0.577, + 0.177, + 0.589 + ], + "angle": 0, + "content": "end if" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.591, + 0.179, + 0.604 + ], + "angle": 0, + "content": "end while" + }, + { + "type": "text", + "bbox": [ + 0.108, + 0.606, + 0.206, + 0.621 + ], + "angle": 0, + "content": "return \\(C_{\\mathrm{centers}}\\)" + }, + { + "type": "title", + "bbox": [ + 0.108, + 0.634, + 0.394, + 0.647 + ], + "angle": 0, + "content": "Function: Iterative Center Identification" + }, + { + "type": "text", + "bbox": [ + 0.108, + 0.649, + 0.483, + 0.679 + ], + "angle": 0, + "content": "Inputs: Remaining vectors \\(\\mathbf{U}\\) (ordered by \\(\\rho_{i}\\)), current cluster center set \\(C_{\\mathrm{centers}}\\), cutoff distance \\(d_{c}\\)" + }, + { + "type": "text", + "bbox": [ + 0.108, + 0.68, + 0.424, + 0.695 + ], + "angle": 0, + "content": "Outputs: Updated cluster center indices \\( C_{\\text{centers}} \\)" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.695, + 0.23, + 0.708 + ], + "angle": 0, + "content": "for all \\(\\mathbf{u}_i\\in \\mathbf{U}\\) do" + }, + { + "type": "text", + "bbox": [ + 0.134, + 0.709, + 0.33, + 0.725 + ], + "angle": 0, + "content": "if \\(\\min_{\\mathbf{u}_s\\in C_{\\mathrm{centers}}}d_{is} > d_c\\) then" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.725, + 0.325, + 0.74 + ], + "angle": 0, + "content": "\\(C_{\\mathrm{centers}} \\gets C_{\\mathrm{centers}} \\cup \\{\\mathbf{u}_i\\}\\)" + }, + { + "type": "text", + "bbox": [ + 0.134, + 0.74, + 0.178, + 0.752 + ], + "angle": 0, + "content": "end if" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.755, + 0.162, + 0.767 + ], + "angle": 0, + "content": "end for" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.77, + 0.205, + 0.784 + ], + "angle": 0, + "content": "return \\(C_{\\mathrm{centers}}\\)" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.825, + 0.484, + 0.9 + ], + "angle": 0, + "content": "predefined set of two-dimensional points. The figure shows that only DBDPC and DBSCAN identify isolated points as distinct clusters, a crucial feature for visual token reduction, as these points contain unique and thus potentially valuable information. We note that, for DBSCAN, these isolated" + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.091, + 0.716, + 0.191 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.519, + 0.197, + 0.716, + 0.292 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.716, + 0.091, + 0.913, + 0.191 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.716, + 0.197, + 0.913, + 0.292 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.512, + 0.306, + 0.907, + 0.363 + ], + "angle": 0, + "content": "Figure 12. An illustrative example of the difference in clustering characteristics between DBDPC and other clustering algorithms. Two-dimensional points and the Euclidean distance were used for illustration purposes." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.386, + 0.907, + 0.628 + ], + "angle": 0, + "content": "points may be identified as noise, depending on the chosen hyperparameters. Moreover, DBDPC partitions both the left and right groups of points into the same number of clusters, maintaining consistency despite the higher density on the left side. In contrast, DPC tends to form a greater number of clusters in high-density regions while creating large clusters in low-density areas, whereas DBSCAN follows the opposite pattern, producing large clusters in high-density regions. In the context of visual token reduction, merging points within these large clusters can result in information loss, leading to performance degradation and making DPC and DBSCAN less suitable than DBDPC for this task. We note that the results presented in Fig. 12 for DPC and DBSCAN may change when modifying the hyperparameters; however, the characteristics discussed above persist across different hyperparameter choices." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.642, + 0.88, + 0.658 + ], + "angle": 0, + "content": "D. Efficient center identification in DBDPC" + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.667, + 0.719, + 0.684 + ], + "angle": 0, + "content": "D.1. A recursive approach" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.689, + 0.907, + 0.901 + ], + "angle": 0, + "content": "To enhance the efficiency of the DBDPC algorithm, we introduce a recursive center identification method that reduces computational overhead while maintaining clustering accuracy. In the DBDPC algorithm, vectors are processed in descending order of their local densities \\(\\rho_{i}\\), and a vector \\(\\mathbf{u}_i\\) is selected as a cluster center if it is farther than the cutoff distance \\(d_c\\) from all previously selected centers. Implementing this as described in the algorithm requires sequentially iterating through all the vectors and checking distances to all previously selected centers, which does not fully leverage GPU parallelization capabilities. In the DBDPC algorithm, when two points have the same density, one is treated as if it has a higher density than the other, depending on the order of their processing. To replicate this behavior, we assign the" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.937 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.091, + 0.092, + 0.325, + 0.106 + ], + "angle": 0, + "content": "density of each point to its rank as:" + }, + { + "type": "equation", + "bbox": [ + 0.168, + 0.118, + 0.407, + 0.136 + ], + "angle": 0, + "content": "\\[\n\\rho_ {i} = \\operatorname {r a n k} _ {i} = \\operatorname {a r g s o r t} \\left(\\left\\{- \\rho_ {j} \\right\\} _ {j = 1} ^ {n}\\right) [ i ]\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.147, + 0.483, + 0.193 + ], + "angle": 0, + "content": "Our accelerated method leverages the quantity \\(\\delta_{i}\\), representing the minimum distance from vector \\(\\mathbf{u}_i\\) to any higher-density vector:" + }, + { + "type": "equation", + "bbox": [ + 0.238, + 0.206, + 0.483, + 0.23 + ], + "angle": 0, + "content": "\\[\n\\delta_ {i} = \\min _ {\\rho_ {j} > \\rho_ {i}} d _ {i j} \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.236, + 0.485, + 0.719 + ], + "angle": 0, + "content": "If \\(\\delta_{i} > d_{c}\\), then \\(\\mathbf{u}_{i}\\) is selected as a cluster center because it is not within \\(d_{c}\\) of any higher-density vector, which are the only potential cluster centers that can be selected before \\(d_{ij}\\) in the DBDPC algorithm. In addition, any vector within \\(d_{c}\\) of a cluster center identified using \\(\\delta_{i}\\) has a lower density than that center, as cluster centers identified using \\(\\delta_{i}\\) are not within \\(d_{c}\\) of any higher-density vector. In the DBDPC algorithm, such a vector would not be chosen as a cluster center because it violates the distance condition relative to already selected centers. By identifying these vectors early, we can exclude them from further consideration as potential centers. We repeat this process recursively: after selecting cluster centers where \\(\\delta_{i} > d_{c}\\) and excluding vectors within \\(d_{c}\\) of these centers, we process the remaining vectors. This recursion continues until the number of newly discovered cluster centers becomes small (e.g., less than 10). At that point, we fall back to the DBDPC method, processing the remaining vectors iteratively to ensure all potential centers are considered. This recursive approach reduces the number of iterations in the main loop and enhances parallelization, particularly on GPUs, by minimizing sequential computation. By leveraging \\(\\delta_{i}\\) and incorporating an early exclusion mechanism, the recursive center identification method reduces computational time while ensuring the same clustering results as the DBDPC algorithm. The recursive approach decreases the number of iterations and enhances GPU parallelization by minimizing sequential computation, making the algorithm more efficient for large datasets. The recursive center identification method is presented in Algorithm 4. We note that in practice this recursive approach reduces the computational time of the DBDPC algorithm by around 3 times." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.728, + 0.483, + 0.745 + ], + "angle": 0, + "content": "D.2. Proof of correctness of the recursive approach" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.75, + 0.483, + 0.87 + ], + "angle": 0, + "content": "To validate the correctness of the accelerated method, we demonstrate the following key points: selected centers are valid cluster centers, excluded vectors are not cluster centers and identifying remaining cluster centers is equivalent to identifying cluster centers on the reduced set. Proving these points suffices to establish correctness, as the remaining vectors after the recursive steps are treated the same as in the DBDPC algorithm." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.871, + 0.483, + 0.902 + ], + "angle": 0, + "content": "Selected Centers Are Valid Cluster Centers In the DB-DPC algorithm, for any vector \\(\\mathbf{u}_i\\), only vectors with higher" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.905, + 0.197 + ], + "angle": 0, + "content": "densities are considered for selection as cluster centers before \\(\\mathbf{u}_i\\). If \\(\\mathbf{u}_i\\) is not within \\(d_c\\) of any higher-density vector (i.e., \\(\\delta_i > d_c\\)) then the distance of \\(\\mathbf{u}_i\\) from any previously selected center cannot exceed the cutoff distance \\(d_c\\). Consequently, \\(\\mathbf{u}_i\\) satisfies the condition for being a cluster center in the DBDPC algorithm, as it is farther than \\(d_c\\) from all centers processed earlier." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.198, + 0.905, + 0.318 + ], + "angle": 0, + "content": "Excluded Vectors Are Not Cluster Centers Vectors within \\( d_{c} \\) of a cluster center identified using \\( \\delta_{i} \\) have lower densities than that center, as these centers are not within \\( d_{c} \\) to any higher density point. In the DBDPC algorithm, such vectors would not be selected as cluster centers because they are within \\( d_{c} \\) to an already selected center, violating the distance condition. Therefore, excluding these vectors early does not affect the selection of valid cluster centers." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.318, + 0.905, + 0.591 + ], + "angle": 0, + "content": "Identifying Remaining Cluster Centers is Equivalent to Identifying Cluster Centers on the Reduced Set After selecting cluster centers where \\(\\delta_{i} > d_{c}\\) and excluding vectors within \\(d_{c}\\) of these centers, we focus on the reduced set of remaining vectors for further processing. The critical observation is that the previously selected cluster centers are not within \\(d_{c}\\) of any vector in the reduced set. This is ensured by the exclusion step, where all vectors within \\(d_{c}\\) of these centers have been removed. Consequently, when identifying new cluster centers within the reduced set, we do not need to consider distances to the previously selected centers, as they cannot influence the selection due to their distance. Moreover, the vectors that have been excluded are not potential cluster centers themselves. Meaning that they can not influence the center selection process. This means that any vector satisfying \\(\\delta > d_{c}\\) in the reduced set, is actually not within \\(d_{c}\\) to any higher density potential cluster center form the initial set, making it a cluster center." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.601, + 0.905, + 0.636 + ], + "angle": 0, + "content": "E. On the choice of Positional IDs for clustering algorithms" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.645, + 0.907, + 0.903 + ], + "angle": 0, + "content": "In our work, we benchmark four clustering algorithms: agglomerative clustering [1], k-means [2], Density Peaks Clustering (DPC) [5], and DBSCAN [14]. For each algorithm, we use the key vectors for clustering, apply a cosine similarity-based distance (as in DBDPC), and evaluate two strategies: merging the hidden states within each cluster or selecting the cluster center as a representative token. We report the best-performing approach for each algorithm. Similar to DBDPC, we assign the position ID of the cluster center to the resulting vectors. However, apart from DPC, the other clustering algorithms do not explicitly provide a cluster center. For k-means and agglomerative clustering, we select the cluster center as the point closest to the average of all points in the cluster, using keys and cosine similarity. For DBSCAN, we experimented with choosing the point connected to the most other points within the cluster and found this approach to yield slightly better results, aligning" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.091, + 0.092, + 0.482, + 0.123 + ], + "angle": 0, + "content": "better with the principles of DBSCAN. Thus, we adopted this strategy in our tests." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.133, + 0.483, + 0.168 + ], + "angle": 0, + "content": "F. More about applying ToME to Visual Language Models" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.178, + 0.483, + 0.54 + ], + "angle": 0, + "content": "ToMe reduces the number of visual tokens at each layer of the transformer. For a given layer \\( i \\), the process starts by splitting the tokens into two distinct sets, A and B. Each token in set A is matched with its most similar counterpart in set B, using cosine similarity based on key vectors to determine the closest pairs. The top \\( r_i \\) pairs with the highest similarity are then selected for merging. Connected components from the matched pairs are combined into single vectors, where hidden states are averaged. It is important to note that each connected component contains exactly one element from set B, and when applying ToME to Visual Language Models, this element's position ID is assigned to the merged token. In [6], the number of visual tokens was reduced by a fixed quantity \\( (r_i = r) \\). However, this fixed reduction scheme cannot achieve more than a \\( 50\\% \\) reduction unless no reduction is done at later layers when the number of tokens drops below \\( r \\), which goes against the gradual reduction strategy proposed in ToMe. To enable higher reduction ratios, we adopt a linearly decreasing scheduler, where the reduction is higher in early layers and decreases in later layers. This approach achieves a smaller average number of visual tokens across the network while still reducing the token count at each layer, allowing us to reach high reduction ratios effectively." + }, + { + "type": "title", + "bbox": [ + 0.09, + 0.555, + 0.483, + 0.59 + ], + "angle": 0, + "content": "G. Implementation details and hyperparameters for PACT" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.599, + 0.483, + 0.901 + ], + "angle": 0, + "content": "For all experiments on LLaVA-OneVision-7B, we set \\( d_{n} = 2 \\), \\( \\alpha = 1.5 \\), and \\( L = 4 \\). While the optimal values of each parameter may vary depending on the dataset, we aim to evaluate the real-world effectiveness of our approach by using consistent values across all testing datasets. The results in Tab. 2 were obtained using \\( d_{c} = 0.21 \\) and \\( \\lambda = 0.55 \\) while those in Tab. 1 were obtained using \\( d_{c} = 0.17 \\) and \\( \\alpha = 0.7 \\). Additionally, to demonstrate the performance of our approach at different reduction ratios, we vary \\( d_{c} \\) and \\( \\lambda \\) and report the results. The values of the fixed parameters \\( d_{n} \\) and \\( \\alpha \\) were chosen by performing a grid search on SeedBench [24], which is why we do not include Seed-Bench in the testing datasets. It is important to note that finding the optimal parameters for all testing datasets is not the focus of this study, as this would require extensive testing of different values for \\( d_{c} \\), \\( \\lambda \\), \\( L \\), \\( \\alpha \\), and \\( d_{n} \\) on all test sets. Such an approach would not accurately reflect the real-world performance of our method. Instead, we chose to only vary \\( d_{c} \\) and \\( \\lambda \\) to evaluate the effectiveness of our approach at different reduction ratios. When" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.908, + 0.485 + ], + "angle": 0, + "content": "testing on SeedBench, we found that a pruning ratio higher than \\(60\\%\\) harms performance. Therefore, we vary the pruning ratio between \\(10\\%\\) and \\(60\\%\\) and test across different values of \\(d_{c}\\). When testing PACT on LLaVA-1.6-Mistral-7B, Qwen2-VL-7B-Instruct and InternVL2-8B. We use the same values of \\(d_{n}\\) and \\(\\alpha\\) as when testing on LLaVA-OneVision-7B. We note that these hyperparameters may not be optimal; however, as we aim to test the generalizability of our approach, we opt to use the same hyperparameters across models. Figure 13, Figure 14 and Figure 15 show the maximum distance between the keys at several layers of the language model for LLaVA-1.6-Mistral-7B, Qwen2-VL-7B-Instruct and InternVL2-8B. Following the same approach for LLaVA-OneVision-7B, we choose \\(L = 4\\) for Qwen2-VL-7B-Instruct and \\(L = 7\\) for InternVL2-8B. We note that the choice of the reduction layer for InternVL2-8B is not as evident as for LLaVA-OneVision-7B and Qwen2-VL-7B-Instruct, as the increase in maximum distance from one layer to the next is sometimes minimal, making it unclear which layer offers the best balance between accuracy and computational efficiency. However, since we do not aim to experimentally determine the optimal reduction layer, we end up choosing \\(L = 7\\), as the maximum distance between keys is increased by an acceptable amount between the seventh and eighth layer. Following the same approach we use \\(L = 7\\) for LLaVA-1.6-Mistral-7B." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.498, + 0.896, + 0.515 + ], + "angle": 0, + "content": "H. More about test datasets and used metrics" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.524, + 0.907, + 0.794 + ], + "angle": 0, + "content": "For evaluating the different approaches, we use LMMs-Eval [26] and aim to follow the same dataset splits and metrics as used in [27]. We detail the used splits and metrics in Tab. 4. Some datasets require evaluation using a GPT model through the OPENAI API or other closed-source models. However, for many datasets the version of the closed-source model used in evaluating LLaVA-OneVision in [27] is no longer available. So we use the latest version of GPT-4 for our assessments at the time of publication (gpt-4o-2024-08-06). We also observed that when calling a closed-source model like GPT-4 via an API, the responses are not fully deterministic, even with a temperature set to zero, introducing some noise into the evaluation metrics. To reduce this noise, we exclude all these datasets when testing across different reduction ratios. On the other hand, for Tab. 1, we exclude MMVet, Vibe-Eval, VideoChatGPT, MM-LiveBench, and LLaVA-Wilder as they have high inference times, which would dominate the throughput calculation." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.796, + 0.909, + 0.903 + ], + "angle": 0, + "content": "For certain datasets, such as DocVQA, InfoVQA, and TextVQA, we use the validation split contrary to [27]. This choice allows us to test various reduction ratios and approaches without requiring submission to the test server, which would be impractical for extensive testing. For datasets requiring a test set submission (EgoSchema and PerceptionTest), where either the validation set is typically" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.506, + 0.936 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.109, + 0.09, + 0.468, + 0.271 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.093, + 0.285, + 0.482, + 0.327 + ], + "angle": 0, + "content": "Figure 13. Illustration of the maximum distance between the keys of visual tokens for the first 10 layers of LLaVA-1.6-Mistral-7B before the application of rotary embeddings." + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.349, + 0.482, + 0.544 + ], + "angle": 0, + "content": "not used for evaluation or does not exist, we report the submission-based metrics evaluated directly on the test set. As explained above, for some datasets our evaluation setup differs from the one used for evaluating LLaVA-OneVision in [27], which may result in variations in the reported results for this model on certain datasets. This is primarily due to the use of validation splits for DocVQA, InfoVQA, and TextVQA, as well as the reliance on GPT-based metrics for some datasets (a common practice for these benchmarks, making alternative evaluation difficult). Nevertheless, our comparisons remain fair, as the same evaluation procedure is consistently applied across all approaches and reduction ratios." + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.546, + 0.481, + 0.62 + ], + "angle": 0, + "content": "We note that when using reduction methods, results may include slight variations due to edge cases where distances or importance metrics for different vectors are equal. That's why we report results based on the average of three different runs for each dataset." + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.621, + 0.482, + 0.785 + ], + "angle": 0, + "content": "Notably, when testing on Qwen2-VL-7B-Instruct without reduction, some datasets encountered GPU out-of-memory errors (MLVU, VideoMME, and ActivityNet Perception) which we excluded from the test set. Additionally, results on ScienceQA were quite low when tested without reduction (0.132), leading to its exclusion from testing as well. We note that, as we use LMM-Eval [26] for evaluation, results differ for some datasets from the officially reported results, as prompts are sometimes not formatted in the same manner. This observation also applies to InternVL2-8B." + }, + { + "type": "title", + "bbox": [ + 0.094, + 0.801, + 0.35, + 0.816 + ], + "angle": 0, + "content": "I. Additional numerical results" + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.826, + 0.481, + 0.9 + ], + "angle": 0, + "content": "Table 8 and Tab. 9 show a comparison of DBDPC and various clustering algorithms for a reduction ratio of approximately \\(60\\%\\) on LLaVA-OneVision-7B across multiple datasets. The results demonstrate that DBDPC outperforms other clustering algorithms in visual token reduction for the" + }, + { + "type": "image", + "bbox": [ + 0.532, + 0.092, + 0.887, + 0.295 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.516, + 0.311, + 0.905, + 0.352 + ], + "angle": 0, + "content": "Figure 14. Illustration of the maximum distance between the keys of visual tokens for the first 10 layers of Qwen2-VL-7B-Instruct before the application of rotary embeddings." + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.401, + 0.905, + 0.596 + ], + "angle": 0, + "content": "majority of the datasets. Additionally, the tables show that the clustering process for DBDPC is significantly faster than that of other clustering algorithms. Table 10 presents a comparison of EUTI-based visual token pruning and FastV for a reduction ratio of approximately \\(60\\%\\) on LLaVA-OneVision-7B across various datasets. The results indicate that EUTI outperforms FastV on most datasets while also being more computationally efficient. Table 15 shows that using keys for distance calculations in DBDPC outperforms hidden states across the majority of the test datasets. Also, we present a comparison between PACT and other visual reduction techniques for InternVL2-8B, and LLaVA-1.6-Mistral-7B across different datasets in Tab. 6, and Tab. 7." + }, + { + "type": "image", + "bbox": [ + 0.533, + 0.647, + 0.887, + 0.843 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.516, + 0.859, + 0.905, + 0.9 + ], + "angle": 0, + "content": "Figure 15. Illustration of the maximum distance between the keys of visual tokens for the first 10 layers of InternVL2-8B before the application of rotary embeddings." + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.926, + 0.504, + 0.936 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.095, + 0.09, + 0.481, + 0.122 + ], + "angle": 0, + "content": "J. Ablation study : Additional numerical results" + }, + { + "type": "text", + "bbox": [ + 0.095, + 0.133, + 0.482, + 0.298 + ], + "angle": 0, + "content": "Table 11 shows a comparison between PACT, DBDPC, and EUTI for a reduction ratio of approximately \\(70\\%\\), applied on LLaVA-OneVision-7B. The results demonstrate that PACT, which combines both clustering and pruning, outperforms the other two methods that are either clustering-based or pruning-based across various datasets. More importantly, DBDPC and EUTI exhibit a significant drop in performance on some of the datasets, which is not the case for PACT. We note that numerical results for the ablation studies conducted on DBDPC, EUTI, and PACT can be found in Tab. 12, Tab. 13 and Tab. 14." + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.927, + 0.504, + 0.936 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.249, + 0.907, + 0.333 + ], + "angle": 0, + "content": "Table 4. Dataset Splits, Subsets, and Evaluation Metrics Used in Our Experiments. Default indicates the use of the standard test split or cases where only one split/subset is available. The evaluation metrics employed are those commonly used for the respective datasets and generally the ones proposed in the official papers. For GPT-based scores (or any model-based scores), this means that a GPT model was used during evaluation, typically to extract answers from the generated output text, which are then matched with the ground truth to calculate accuracy using exact matches. When accuracy is reported, it generally implies that only an exact match is considered a correct answer." + }, + { + "type": "table", + "bbox": [ + 0.227, + 0.344, + 0.768, + 0.74 + ], + "angle": 0, + "content": "
DatasetSplitSubsetEvaluation Metric
VideoMMEDefaultNo subtitlesAccuracy
MMEDefaultDefaultMME Perception Score
DocVQAValidationDefaultANLS
MLVUDefaultDefaultAccuracy
LLaVA-InterleaveDefaultOut-domainAccuracy
ChartQAValidationDefaultRelaxed Accuracy
MMBenchValidationEnglishGPT-based Score
MuirBenchDefaultDefaultAccuracy
ScienceQADefaultVision onlyAccuracy
MMMUValidationDefaultAccuracy
AI2DDefaultDefaultAccuracy
InfographicVQAValidationDefaultANLS
MMStarDefaultDefaultAccuracy
ActivityNetQADefaultDefaultGPT-based Score
MM-LiveBenchDefault2406GPT-based Score
LLaVA-WilderDefaultSmallGPT-based Score
MathVerseDefaultVision miniGPT-based Score
MathVistaDefaultTestminiGPT-based Score
MMVetDefaultDefaultGPT-based Score
Vibe-EvalDefaultDefaultREKA-based Score
VideoChatGPTDefaultDefaultGPT-based Score
EgoSchemaDefaultDefaultSubmission
PerceptionTestDefaultMultiple Choice QASubmission
TextVQAValidationDefaultOfficial metric
" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.506, + 0.936 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.228, + 0.095, + 0.77, + 0.109 + ], + "angle": 0, + "content": "Table 5. Performance of PACT on LLaVA-OneVision-7B using \\(d_{c} = 0.17\\) and \\(\\alpha = 0.7\\)." + }, + { + "type": "table", + "bbox": [ + 0.258, + 0.121, + 0.744, + 0.464 + ], + "angle": 0, + "content": "
DatasetPACT (Ours)
MetricRed. RatioProc. TimeAlgo. Time
VideoMME57.769.2%0.3210.021
MME1571.072.1%0.2260.017
DocVQA85.471.1%0.4670.026
MLVU64.869.2%0.3220.022
LLaVA-Interleave62.272.2%0.1330.010
ChartQA77.371.4%0.3090.019
MMBench79.972.0%0.1340.010
MuirBench42.470.9%0.1750.013
ScienceQA93.572.0%0.1300.010
MMMU48.872.6%0.1030.007
AI2D81.272.5%0.1730.013
InfographicVQA61.570.0%0.4030.023
MMStar59.572.3%0.1470.011
ActivityNetQA55.170.0%0.4090.029
MathVerse17.176.0%0.3500.021
MathVista62.173.0%0.2600.015
EgoSchema60.069.1%0.3200.021
PerceptionTest52.370.0%0.3010.023
TextVQA75.569.2%0.3200.023
" + }, + { + "type": "table_caption", + "bbox": [ + 0.165, + 0.48, + 0.833, + 0.494 + ], + "angle": 0, + "content": "Table 6. Comparison of PACT with FastV, VTW, and ToME applied on InternVL2-8B on Various Datasets." + }, + { + "type": "table", + "bbox": [ + 0.095, + 0.505, + 0.907, + 0.71 + ], + "angle": 0, + "content": "
DatasetNo ReductionPACT (Ours)FastVVTWToME
MetricProc. TimeMetricRed. RatioProc. TimeMetricProc. TimeMetricProc. TimeMetricProc. Time
VideoMME52.20.24751.168.4%0.15151.10.15551.00.14250.20.190
MME1621.00.1711591.969.9%0.1211588.70.1181627.00.1111533.30.155
MLVU50.60.43949.768.8%0.32648.80.32549.50.33329.30.343
LLaVA-Interleave40.00.39039.071.2%0.26539.70.26339.60.23036.70.316
MMBench81.90.16180.470.4%0.11880.20.11680.20.10970.80.165
MuirBench35.70.43234.470.3%0.24935.60.25833.70.21032.70.296
ScienceQA97.10.16597.170.8%0.11895.80.11695.70.10989.90.151
MMMU48.50.16748.070.6%0.12647.70.12647.80.11947.50.156
AI2D82.50.14681.470.7%0.11278.50.11079.60.10574.40.142
MMStar59.00.17956.770.4%0.18654.20.18453.40.35255.10.156
PerceptionTest57.70.30056.866.0%0.20356.20.21334.10.19255.20.228
EgoSchema54.00.24053.767.0%0.15553.10.16332.20.14652.90.172
ActivityNet51.70.24051.366.0%0.15351.00.16130.80.14350.40.171
MM-LiveBench68.03.07567.368.0%2.14067.02.24740.42.00366.62.354
" + }, + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.723, + 0.885, + 0.737 + ], + "angle": 0, + "content": "Table 7. Comparison of PACT with FastV, Prumerge, and Hired applied on LLaVA-1.6-Mistral-7B across multiple datasets." + }, + { + "type": "table", + "bbox": [ + 0.095, + 0.748, + 0.907, + 0.895 + ], + "angle": 0, + "content": "
DatasetNo ReductionPACT (Ours)FastVPrumergeHired
MetricProc. TimeMetricRed. RatioProc. TimeMetricProc. TimeMetricProc. TimeMetricProc. Time
MME1500.00.2371507.170.3%0.1591503.90.1581485.40.1661497.00.168
DocVQA70.00.36367.167.1%0.28464.50.28148.80.29365.80.295
ChartQA52.90.33249.370.1%0.25948.90.26136.00.26446.10.266
MMBench68.20.22668.071.9%0.15567.90.15466.20.16067.60.164
ScienceQA73.00.19772.771.5%0.14473.20.14571.70.14872.90.149
MMMU34.20.23934.971.5%0.17134.70.16933.90.18033.90.180
AI2D67.50.23367.570.9%0.16067.00.15864.50.16565.90.166
InfographicVQA36.90.29435.666.2%0.22633.40.22931.90.23631.60.236
MMStar36.20.37536.771.9%0.35036.60.40035.10.34535.90.345
" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.926, + 0.504, + 0.936 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.103, + 0.907, + 0.131 + ], + "angle": 0, + "content": "Table 8. Comparison of DBDPC and Agglomerative Clustering Methods for a Reduction Ratio of approximately \\(60\\%\\) on LLaVA-OneVision-7B." + }, + { + "type": "table", + "bbox": [ + 0.094, + 0.142, + 0.907, + 0.316 + ], + "angle": 0, + "content": "
DatasetDBDPC (ours)Agg. (Single Linkage)Agg. (Average Linkage)Agg. (Complete Linkage)
MetricProc. TimeAlgo. TimeMetricProc. TimeAlgo. TimeMetricProc. TimeAlgo. TimeMetricProc. TimeAlgo. Time
VideoMME57.40.3890.04057.61.5041.14857.01.6571.31657.91.6901.350
MME1563.80.2550.0281554.10.9940.7381559.21.1230.8681563.01.1510.897
DocVQA84.70.5300.04483.61.8991.37984.42.1851.66284.32.3081.777
MLVU64.20.3840.03964.01.5741.22965.21.6751.32964.81.7001.355
LLaVA-Interleave62.10.1510.01662.00.4250.27761.50.4460.29861.40.4460.298
ChartQA76.00.3660.03174.51.1510.79875.81.2530.91075.81.2770.930
MMBench80.10.1510.01679.50.4270.27779.70.4370.29179.80.4490.299
MuirBench43.20.2150.02341.40.6670.47442.00.7270.53442.00.7380.544
ScienceQA94.70.1470.01594.80.3940.25094.70.4160.27194.70.4130.269
MMMU48.30.1100.00948.40.2180.11049.30.2320.12148.20.2250.117
AI2D80.70.2020.02280.80.6670.47280.60.7480.55180.10.7530.557
InfographicVQA61.60.5280.04657.11.6081.18159.81.8181.39459.81.8701.436
MMStar60.50.1670.01860.20.5070.34459.80.5560.39060.50.5600.395
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.35, + 0.905, + 0.379 + ], + "angle": 0, + "content": "Table 9. Comparison of DBDPC, DBSCAN, DPC, and KMeans Clustering Methods for a Reduction Ratio of approximately \\(60\\%\\) on LLaVA-OneVision-7B." + }, + { + "type": "table", + "bbox": [ + 0.095, + 0.39, + 0.907, + 0.562 + ], + "angle": 0, + "content": "
DatasetDBDPC (ours)DBSCANDPCKMeans
MetricProc. TimeAlgo. TimeMetricProc. TimeAlgo. TimeMetricProc. TimeAlgo. TimeMetricProc. TimeAlgo. Time
VideoMME57.40.3890.04057.40.3940.04656.90.7290.39257.31.7251.383
MME1563.80.2550.0281560.30.2740.0361549.90.6370.3801549.91.2540.999
DocVQA84.70.5300.04484.20.5330.04483.00.9500.44279.62.0591.544
MLVU64.20.3840.03964.20.3910.04864.20.7270.38264.61.7251.377
LLaVA-Interleave62.10.1510.01660.40.1590.02663.90.2580.12162.30.7110.566
ChartQA76.00.3660.03175.20.3690.03475.20.7580.41574.21.3991.059
MMBench80.10.1510.01678.10.1530.02079.50.3260.17979.90.7020.552
MuirBench43.20.2150.02342.40.2190.02842.00.4660.27342.90.9550.763
ScienceQA94.70.1470.01591.20.1500.02494.30.2510.11793.40.6610.518
MMMU48.30.1100.00947.80.1300.03048.30.1870.07848.20.5000.391
AI2D80.70.2020.02279.20.2020.02280.30.4550.26481.11.0620.860
InfographicVQA61.60.5280.04654.00.5310.05256.60.9750.54757.81.7801.357
MMStar60.50.1670.01856.60.1790.02860.60.3760.21360.20.8280.661
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.597, + 0.907, + 0.625 + ], + "angle": 0, + "content": "Table 10. Comparison of EUTI-based visual tokens pruning and FastV for a Reduction Ratio of approximately \\(60\\%\\) on LLaVA-OneVision-7B." + }, + { + "type": "table", + "bbox": [ + 0.171, + 0.637, + 0.825, + 0.886 + ], + "angle": 0, + "content": "
DatasetEUTI (Ours)FastV
MetricProc. TimeAlgo. TimeMetricProc. TimeAlgo. Time
VideoMME58.40.3510.00557.60.3810.040
MME1560.00.2560.0041570.70.2830.025
DocVQA86.50.5210.00585.30.5590.032
MLVU64.30.3550.00463.10.3910.040
LLaVA-Interleave58.90.1400.00359.70.1520.007
ChartQA78.60.3440.00478.00.3630.016
MMBench80.20.1420.00379.20.1510.005
MuirBench40.00.1910.00340.80.2040.009
ScienceQA93.60.1370.00392.30.1490.006
MMMU48.80.1010.00247.30.1100.003
AI2D81.10.1910.00380.30.2020.009
InfographicVQA63.00.4250.00560.30.4730.040
MMStar59.60.1590.00359.60.1700.007
" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.926, + 0.506, + 0.937 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.15, + 0.907, + 0.18 + ], + "angle": 0, + "content": "Table 11. Comparison of PACT with Standalone Methods: EUTI-based Visual Token Pruning and DBDPC Clustering Algorithm for a Reduction Ratio of approximately \\(70\\%\\), applied on LLaVA-OneVision-7B." + }, + { + "type": "table", + "bbox": [ + 0.095, + 0.189, + 0.907, + 0.413 + ], + "angle": 0, + "content": "
DatasetPACTDBDPCEUTI
MetricProc. TimeAlgo. TimeMetricProc. TimeAlgo. TimeMetricProc. TimeAlgo. Time
VideoMME57.50.3210.02157.30.3420.04058.40.3050.005
MME1558.70.2260.0171543.70.2430.0281595.90.2130.004
DocVQA84.30.4670.02682.50.5000.04485.30.4560.005
MLVU64.60.3220.02263.90.3580.03964.40.2910.004
LLaVA-Interleave63.90.1330.01062.60.1490.01657.10.1270.003
ChartQA77.20.3110.01975.10.3330.03178.20.2920.004
MMBench80.20.1340.01079.70.1470.01679.60.1280.003
MuirBench42.80.1750.01343.20.2110.02339.90.1640.003
ScienceQA93.60.1300.01093.80.1420.01592.20.1230.003
MMMU48.90.1030.00747.20.1090.00948.90.0960.002
AI2D80.60.1730.01380.50.1910.02279.90.1640.003
InfographicVQA61.90.4030.02358.80.4650.04660.40.3600.005
MMStar59.50.1470.01159.50.1630.01859.20.1400.003
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.542, + 0.907, + 0.598 + ], + "angle": 0, + "content": "Table 12. Ablation Studies on DBDPC-based visual token reduction for a Reduction Ratio of approximately \\(60\\%\\) on LLaVA-OneVision-7B. We report only the metrics, as processing time is similar across different approaches. When ablating the Center Position-IDs assignment, we reorder the hidden states based on the mean of the Position-IDs of the elements in each cluster and then assign position IDs sequentially." + }, + { + "type": "table", + "bbox": [ + 0.1, + 0.609, + 0.891, + 0.838 + ], + "angle": 0, + "content": "
DBDPCw/o Center Position-IDs assignmentw/o Proportional Attentionw/o Merging
VideoMME57.458.057.957.5
MME1563.81539.31523.81476.9
DocVQA84.728.284.283.1
MLVU64.261.263.963.5
LLaVA-Interleave62.169.663.263.6
ChartQA76.024.876.074.4
MMBench80.176.180.179.6
MuirBench43.226.543.244.0
ScienceQA94.767.494.293.6
MMMU48.334.547.648.2
AI2D80.743.080.479.9
InfographicVQA61.617.859.858.7
MMStar60.558.959.659.1
" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.088, + 0.908, + 0.119 + ], + "angle": 0, + "content": "Table 13. Ablation Studies on the EUTI-based Visual Token Pruning for a Reduction Ratio of approximately \\(70\\%\\), applied on LLaVA-OneVision-7B. We report only the metrics, as processing time is similar across different approaches." + }, + { + "type": "table", + "bbox": [ + 0.209, + 0.127, + 0.785, + 0.356 + ], + "angle": 0, + "content": "
DatasetEUTIEUTI w/o NormNorm (EUTI w/o Global Query)
VideoMME58.457.656.6
MME1595.91573.41576.5
DocVQA85.385.179.7
MLVU64.363.063.1
LLaVA-Interleave57.157.952.9
ChartQA78.276.476.7
MMBench79.679.479.4
MuirBench40.040.539.6
ScienceQA92.291.893.5
MMMU48.949.349.2
AI2D79.979.979.7
InfographicVQA60.460.149.3
MMStar59.257.459.2
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.36, + 0.908, + 0.391 + ], + "angle": 0, + "content": "Table 14. Ablation Study on Pruned Tokens Recovery for a Reduction Ratio of approximately \\(70\\%\\). We remove the token recovery step, which is equivalent to Setting \\(\\alpha\\) to Zero. We report only the metrics, as processing time is similar across both approaches." + }, + { + "type": "table", + "bbox": [ + 0.266, + 0.399, + 0.726, + 0.628 + ], + "angle": 0, + "content": "
DatasetPACTPACT w/o Pruned-Token Recovery
VideoMME57.657.4
MME1556.71576.3
DocVQA84.384.3
MLVU64.664.2
LLaVA-Interleave63.959.6
ChartQA76.476.4
MMBench79.979.8
MuirBench42.842.2
ScienceQA93.393.6
MMMU48.548.5
AI2D80.680.6
InfographicVQA61.961.3
MMStar75.174.9
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.632, + 0.908, + 0.663 + ], + "angle": 0, + "content": "Table 15. Ablation Study on Keys Utilization in DBDPC for a Reduction Ratio of approximately \\(60\\%\\). Metrics are reported, as processing time is similar across both configurations." + }, + { + "type": "table", + "bbox": [ + 0.321, + 0.672, + 0.671, + 0.901 + ], + "angle": 0, + "content": "
DatasetDBDPCDBDPC w/o Keys
VideoMME57.4057.22
MME1563.801526.18
DocVQA84.7080.50
MLVU64.2064.60
LLaVA-Interleave62.1060.80
ChartQA76.0068.80
MMBench80.1079.21
MuirBench43.2041.40
ScienceQA94.7091.90
MMMU48.3047.90
AI2D80.7079.10
InfographicVQA61.656.70
MMStar60.5058.40
" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "11" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_08xxx/2504.08966/d7a19180-6a88-4095-b8c3-57d4a8694136_origin.pdf b/data/2025/2504_08xxx/2504.08966/d7a19180-6a88-4095-b8c3-57d4a8694136_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..d644db06cdbbe8d8ad57d4b6180703973b7fa68b --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/d7a19180-6a88-4095-b8c3-57d4a8694136_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f32326e846aded257521c22ceab0874be0c8fdc6633f2dd133c4974bf47d96ae +size 1583048 diff --git a/data/2025/2504_08xxx/2504.08966/full.md b/data/2025/2504_08xxx/2504.08966/full.md new file mode 100644 index 0000000000000000000000000000000000000000..d5e43c422bb300fe750ed6b291c280a016560799 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/full.md @@ -0,0 +1,712 @@ +# PACT: Pruning and Clustering-Based Token Reduction for Faster Visual Language Models + +Mohamed Dhouib + +LIX, École Polytechnique, IP Paris, France +mohamed.dhouib@polytechnique.edu + +Sonia Vanier + +LIX, École Polytechnique, IP Paris, France +sonia.vanier@polytechnique.edu + +Davide Buscaldi + +LIPN, Université Sorbonne Paris Nord, France davide.buscaldi@lipn.univ-paris13.fr + +Aymen Shabou + +DataLab Groupe, Crédit Agricole S.A, France + +aymen.shabou@credit-agricole-sa.fr + +# Abstract + +Visual Language Models require substantial computational resources for inference due to the additional input tokens needed to represent visual information. However, these visual tokens often contain redundant and unimportant information, resulting in an unnecessarily high number of tokens. To address this, we introduce PACT, a method that reduces inference time and memory usage by pruning irrelevant tokens and merging visually redundant ones at an early layer of the language model. Our approach uses a novel importance metric to identify unimportant tokens without relying on attention scores, making it compatible with FlashAttention. We also propose a novel clustering algorithm, called Distance Bounded Density Peak Clustering, which efficiently clusters visual tokens while constraining the distances between elements within a cluster by a predefined threshold. We demonstrate the effectiveness of PACT through extensive experiments. + +# 1. Introduction + +Extending Large language models to modalities other than text [11, 18, 19, 55, 56] has seen success in recent years across various domains, especially in the visual domain with models like LLaVA [31] and Qwen-VL [4]. State-of-the-art Visual Language Models generally consist of three main components: a vision encoder, a connector, and a language model. The vision encoder converts input images into visual tokens, which are passed through the connector and then fed to the language model along with the input text. While this architecture has shown impressive performance across different tasks, it suffers from high computational cost due to the large number of visual tokens. In this paper, we introduce two complementary methods to + +optimize Visual Language Models by reducing inference time and memory requirements: a pruning module and a clustering algorithm. These methods can be used independently or combined, forming the PACT approach for greater effectiveness. Notably, our pruning and clustering modules, as well as PACT, are applied at inference time and thus require no additional training. The pruning module identifies unimportant visual tokens based on a novel importance metric that evaluates each token's relevance without relying on attention scores. This makes it compatible with FlashAttention [12], as FlashAttention does not support the calculation of attention scores. The second module introduces a novel clustering algorithm, Distance Bounded Density Peak Clustering (DBDPC), which clusters visual tokens while ensuring that the distances between elements within a cluster are constrained by a predefined threshold. By combining these two methods, we develop PACT. First, the pruning module eliminates unimportant tokens, then the DBDPC algorithm clusters the remaining ones. Tokens that were initially pruned but are sufficiently close to the constructed clusters are reincorporated, ensuring that valuable information from the pruned tokens is recovered. Finally, the tokens within each cluster are merged into a single representative token, reducing the total token count. + +By combining both pruning and clustering, PACT achieves an effective visual token reduction, addressing both irrelevant and redundant tokens. When applied to LLaVA-OneVision-7B, PACT achieves a $50\%$ visual token reduction with negligible performance loss. Moreover, PACT exhibits significantly less performance degradation at higher reduction ratios compared to previous methods, achieving $71.3\%$ visual token reduction ratio with only $1.4\%$ performance drop, whereas previous state-of-the-art methods show at best a $4.4\%$ performance drop at an equal reduction ratio. Our contributions are as follows: + +- We propose a novel visual token pruning metric that does + +not rely on attention scores, ensuring compatibility with FlashAttention, and empirically validate its effectiveness. + +- We introduce a new clustering algorithm aimed at reducing visual redundancy and show its superiority over other clustering algorithms for visual token reduction. +- We show that combining pruning with clustering-based merging surpasses either technique alone for visual token reduction. By integrating our pruning and clustering algorithms, we propose a novel approach, PACT, and demonstrate that it outperforms previous and concurrent works [3, 6, 9, 30, 44]. The codebase used to obtain the results in this study is available at https://github.com/orailix/PACT/tree/main. + +# 2. Related work + +# 2.1. Visual language models + +Since the introduction of BLIP-2 [28], the use of a visual encoder followed by a connector that feeds visual vectors to the language model has become the standard architecture for Visual Language Models (VLMs) [7, 17, 50]. Recent models [10, 27, 49] have enhanced VLM architecture with high-resolution handling, which is necessary for document understanding tasks [13, 23]. LLaVA-OneVision [27] divides images into $384 \times 384$ crops, encodes each part with SigLIP [54], and uses bilinear interpolation to reduce token count up to 8,748 tokens. InternVL2 [10] splits images into $448 \times 448$ tiles, processing up to 40 tiles per image with InternViT [10], and applies pixel shuffle to reduce the number of visual tokens, producing up to 10,240 tokens. Qwen-VL2 [49] uses 2D Rotary Positional Embeddings for dynamic resolution support and merges adjacent tokens via an MLP layer, yet still requires over 10,000 tokens for high resolution images. While these models apply token reduction by merging adjacent tokens to preserve structure, they do not address token irrelevance or redundancy, limiting efficiency. + +# 2.2. Visual token reduction + +Reducing the number of visual tokens in Vision Transformers (ViT) has been a key focus of the research community for several years. EViT [29] identifies and merges irrelevant tokens by relying on the attention scores between the class token ([CLS]) and visual tokens. ToME [6] proposed a simple yet effective approach that iteratively merges similar tokens throughout the ViT layers. Building on these ideas, recent efforts have extended visual token reduction techniques to VLMs. LaVIT [21] used the Gumbel-Softmax [20] to train a mask that selects tokens for retention, merging discarded tokens into retained ones via additional attention layers. LLaVA-PruMerge [44] accelerates LLAVA 1.5 [31] by leveraging the attention scores between the [CLS] token and visual tokens in the last layer of the ViT encoder to decide which tokens to retain, while HiRED [3] refines + +this approach by allocating token budgets based on attention from earlier layers. However, both these methods are only applicable to architectures where a ViT is used and a [CLS] token is added to the input visual sequence, making them incompatible with the majority state-of-the-art VLMs, which do not use a [CLS] token. Moreover, both methods attribute scores to tokens at the output of the visual encoder, but recent VLMs merge adjacent visual tokens before passing them to the language model. It is unclear how to attribute pre-merging scores to the resulting tokens, making LLaVA-PruMerge and HiRED inapplicable. We note that LLaVA-PruMerge mitigates information loss by merging pruned tokens with retained ones. However, it does not merge similar retained tokens; therefore, it does not address visual redundancy, a typical limitation of pruning-based methods. TRIM [46] prunes tokens based on similarity with pooled text from CLIP [42]. However, as TRIM relies on textual information for pruning, it is less suitable for multi-turn conversations where, in practice, visual tokens would be pruned solely based on the text information available during the image's forward pass, potentially losing crucial information required to answer subsequent prompts. FastV [9] evaluates token importance via average attention scores, which is not compatible with FlashAttention, adding computational overhead for recent VLMs. VTW [30] removes tokens in deeper layers. While this method shows promising results, its reduction of computational costs is limited as visual tokens are only withdrawn in later layers. These previous methods address only one of two issues: the presence of unimportant tokens or visual redundancy. In this work, we introduce PACT, a novel approach that tackles both issues simultaneously by pruning irrelevant tokens and merging visually redundant ones. + +# 3. Method + +In this section, we present PACT, a method that aims to reduce VLMs inference time and memory usage by pruning unimportant tokens and merging visually redundant ones at an early layer $L$ of the language model. PACT consists of three steps: First, unimportant tokens are identified. Next, the remaining tokens are clustered. Finally, tokens in each cluster, along with sufficiently close tokens that were initially discarded, are merged. PACT operates within a selected layer $L$ of the language model and is applicable in scenarios where visual tokens are fed into the language model, regardless of the architecture of the visual encoder or connector. The three-step process of PACT is illustrated in Figure 1. We denote the hidden states at layer $L$ by $\mathbf{H} \in \mathbb{R}^{n \times d}$ , where $n$ is the number of visual tokens and $d$ is the dimensionality of the hidden states. We denote by $\mathbf{K}, \mathbf{Q} \in \mathbb{R}^{n \times n_h \times d_h}$ the key and query matrices for the visual tokens at layer $L$ , where $n_h$ represents the number of attention heads and $d_h$ is the dimensionality of each attention heads. + +![](images/c055f89776f93fc24bbcc1f8f24389d305be1809f3b8c99f2fb4443a656c9543.jpg) +Figure 1. Simplified illustration of PACT. This figure illustrates the three-step process of PACT: (1) First, EUTI is used to prune visual tokens deemed unimportant; (2) Then, DBDPC is applied to cluster the remaining tokens, ensuring that the distance between each token and its corresponding cluster center is smaller than the cutoff distance; (3) Finally, initially pruned tokens that are close to cluster centers are reintegrated, and the elements within each cluster are merged to form the reduced set of visual tokens. + +# Algorithm 1 EUTI + +Input: Hidden states $\mathbf{H} \in \mathbb{R}^{n \times d}$ ; key and query matrices + +$\mathbf{K}, \mathbf{Q} \in \mathbb{R}^{n \times n_h \times d_h}$ ; pruning percentage $\lambda \in [0,1]$ + +Output: Sets of important and unimportant visual tokens + +Step 1: Calculate the global query vector + +$$ +\mathbf {Q} _ {\text {g l o b a l}} = \frac {1}{n} \sum_ {i = 1} ^ {n} \mathbf {Q} _ {i} +$$ + +Step 2: Compute the importance score for each visual token + +for all $i = 1,\dots ,n$ do + +$$ +s _ {i} = \frac {1}{n _ {h}} \sum_ {j = 1} ^ {n _ {h}} \operatorname {S o f t m a x} \left(\mathbf {k} _ {i} ^ {(j)} \cdot \mathbf {Q} _ {\text {g l o b a l}} ^ {(j)}\right) \cdot \left\| \mathbf {h} _ {i} \right\| _ {2} +$$ + +end for + +Step 3: Define sets of important and unimportant tokens + +$$ +S _ {\text {i m p o r t a n t}} = \left\{i \mid s _ {i} \geq \text {P e r c e n t i l e} (s, \lambda) \right\} +$$ + +$$ +S _ {\text {u n i m p o r t a n t}} = \{i \mid s _ {i} < \text {P e r c e n t i l e} (s, \lambda) \} +$$ + +Return $S_{\text{important}}$ , $S_{\text{unimportant}}$ + +tion head. For simplicity, we omit the layer index in the notation. We denote the position index of a token by a subscript, while the attention head is indicated by a superscript. For instance, $\mathbf{k}_i^{(j)}$ represents the key vector corresponding to the $i$ -th visual token and the $j$ -th attention head. + +# 3.1. Unimportant tokens identification + +A straightforward approach to identifying unimportant tokens at a certain layer $L$ of the used language model is to define the importance of each token as the total attention score that a given token receives from all other tokens [9]. However, this method has three main drawbacks. First, current VLMs utilize FlashAttention [12], which does not support outputting attention scores. Secondly, attention scores are computed with masking, which introduces + +biases. Tokens at the end of a sequence tend to receive lower average attention scores since fewer tokens attend to them. Calculating the average attention score for each token based solely on the tokens that attend to it can mitigate this masking effect but introduces a new bias: end-of-sequence tokens may exhibit higher scores as they receive attention mainly from nearby tokens. This leads to either earlier or later tokens being pruned more frequently, as shown in Fig. 2. Such positional bias should be avoided, as pruning should depend solely on the information that visual tokens hold, not their position. Finally, relying only on keys and queries at a single layer to determine an importance metric may fail to fully capture the significance of visual tokens across all layers of the language model, mainly because each self-attention layer focuses on different aspects of the visual tokens. To address this, we propose an importance metric that incorporates the accumulated in + +![](images/080873109efc60b65c42f2aecfac3032a68e1da59662837163d12b46e5cb4846.jpg) +(a) Average attention scores as a function of Position IDs. + +![](images/29dbc9ef9e90d1c096ca3c827f33d88639e68bf65ca8cf013f9381ab98854e7a.jpg) +(b) Average attention scores relative to non-masked tokens as a function of Position IDs. +Figure 2. Illustration of the bias induced by the use of the average attention scores across visual tokens as a pruning metric. In (a), averaging attention over all tokens favors earlier tokens, leading to pruning later tokens more frequently. In (b), averaging only over attending tokens reverses the bias, leading to earlier tokens being pruned more often. + +![](images/d09f70419ec32d57ee282deb3099146e4993cb5122244a5ca35c44d6d395531b.jpg) +Figure 3. Illustration of visual token norm statistics at the fourth layer of LLaVA-OneVision-7B. + +formation from the hidden states and the layer-specific information from the keys and queries at an early layer $L$ . We refer to this method as Efficient Unimportant Tokens Identification (EUTI). We speculate that the norm of hidden states can provide critical information about the importance of each visual token, as they reflect how much information a particular token carries through the network. Figure 3 presents statistics on the hidden state norms of visual tokens at the fourth layer of LLaVA-OneVision-7B, indicating a high variance. This variance suggests that certain visual tokens accumulate more information through residual connections and may therefore be more important for subsequent calculations. To leverage information from both hidden state norms and the key and query vectors, we first compute a global query vector $\mathbf{Q}_{\mathrm{global}}$ as the average of all query vectors across visual tokens: + +$$ +\mathbf {Q} _ {\text {g l o b a l}} = \frac {1}{n} \sum_ {i = 1} ^ {n} \mathbf {Q} _ {i} \tag {1} +$$ + +This vector represents the overall query information requested by visual tokens at layer $L$ across all attention + +![](images/66838683c6d4d826816b766acbd7ffa79eb6b730ce42f44926fc5a77a836db69.jpg) +Figure 4. Illustration of the maximum distance between the keys of visual tokens for the first 10 layers of LLaVA-OneVision-7B before the application of rotary embeddings. + +heads. The importance score for each visual token is then computed by first taking the dot product between its key and the global query for each attention head. A softmax is applied across visual tokens within each attention head, followed by averaging across attention heads. The final score is obtained by scaling the result with the hidden state norm: + +$$ +s _ {i} = \frac {1}{n _ {h}} \sum_ {j = 1} ^ {n _ {h}} \operatorname {S o f t m a x} \left(\mathbf {k} _ {i} ^ {(j)} \cdot \mathbf {Q} _ {\text {g l o b a l}} ^ {(j)}\right) \cdot \| \mathbf {h} _ {i} \| _ {2} \tag {2} +$$ + +Then, we divide the visual tokens into important and unimportant tokens, using a parameter $\lambda \in [0,1]$ to control the percentage of tokens deemed unimportant. The two sets are defined as follows: + +$$ +S _ {\text {i m p o r t a n t}} = \left\{i \mid s _ {i} \geq \text {P e r c e n t i l e} (s, \lambda) \right\} \tag {3} +$$ + +$$ +S _ {\text {u n i m p o r t a n t}} = \left\{i \mid s _ {i} < \text {P e r c e n t i l e} (s, \lambda) \right\} \tag {4} +$$ + +Unimportant tokens can be pruned, or the resulting sets can be combined with a clustering algorithm to further reduce the number of visual tokens, as we will show in the next section. The full EUTI algorithm is illustrated in Algorithm 1. We note that in the case where Rotary Embeddings are used [47], we use the keys and queries before their application to avoid any positional bias. + +# 3.2. Clustering-based merging of visual tokens + +Distance Bounded Density Peak Clustering Relying solely on the importance scores presented above to prune unimportant tokens can lead to a significant reduction in visual tokens, retaining only important ones. However, redundant information may still be present across retained visual tokens. Therefore, we propose merging the redundant visual tokens using a clustering algorithm. We desire our clustering algorithm to have the following characteristics: + +(a) Low computational time. +(b) Avoid assigning points that are far from each other, in terms of feature similarity, into the same cluster. + +Table 1. Throughput ratio, reduction ratio, and GPU memory usage for PACT, FastV, VTW, and ToME applied to LLaVA-OneVision-7B. Results are reported at a $98.6\%$ Approach-to-Reference Metric Ratio. + +
No reductionPACT (ours)FastVVTWToME
Reduction Ratio0%71.3%50%25%40%
LLM Throughput Ratio100%225%165%160%137%
GPU Maximum Memory Consumption (GB)27.419.0530.419.221.4
+ +Condition (b) ensures that outliers are not assigned to distant cluster centers, as we speculate that these outliers contain important information and should only be merged with nearby outliers or remain as single points in separate clusters. Condition (b) also guarantees that points in each cluster will be relatively close to each other, which minimizes + +information loss when assigning a single vector as their representative. The Density Peaks Clustering (DPC) algorithm [5] is appealing in this context because it satisfies condition (a), unlike iterative clustering algorithms like k-means [2]. However, DPC does not satisfy condition (b) as it can form large clusters where boundary points may be distant from each other. The same issue arises with other algorithms such as DBSCAN [14]. Therefore, we propose a new clustering algorithm, which we call Distance Bounded Density Peaks Clustering (DBDPC). + +DBDPC takes as input a set of vectors $\{\mathbf{u}_i\in \mathbb{R}^{d_1}\}_{i = 1}^q$ where $q,d_{1}\in \mathbb{N}^{+}$ , and outputs a set of clusters. Our algorithm's output depends on two parameters, the cutoff distance $d_c\in \mathbb{R}^+$ and a normalization factor $d_{n}\in \mathbb{R}^{+}$ , as well as a distance function $d:\mathbb{R}^{d_1}\times \mathbb{R}^{d_1}\to \mathbb{R}^+$ . We define the distance between two vectors $\mathbf{u}_i$ and $\mathbf{u}_j$ as: + +$$ +d _ {i j} = d \left(\mathbf {u} _ {i}, \mathbf {u} _ {j}\right) = 1 - \frac {\mathbf {u} _ {i} \cdot \mathbf {u} _ {j}}{\| \mathbf {u} _ {i} \| _ {2} \| \mathbf {u} _ {j} \| _ {2}} \tag {5} +$$ + +Then the local density $\rho_{i}$ is calculated as: + +$$ +\rho_ {i} = \sum_ {j} e ^ {- d _ {i j} / d _ {n}} \tag {6} +$$ + +We process the $\mathbf{u}_i$ vectors from highest to lowest $\rho$ values and designate a vector as a cluster center if its minimum distance from already selected centers is greater than $d_c$ . Each vector $\mathbf{u}_i$ is then assigned to the cluster of the closest center. Our algorithm guarantees that the distance from each vector to its cluster center is less than $d_c$ , thereby satisfying condition (b) stated above. The full DBDPC algorithm is detailed in Algorithm 2. The center identification process in DBDPC ensures that inter-cluster distances are upper-bounded by $2d_c \times (2 - d_c)$ while distances between cluster centers are lower-bounded by $d_c$ , which we formally prove in Appendix B. We note that several parts of our algorithm are presented as for-loops for clarity. However, all computations are parallelizable on GPU, as there are no dependencies between the elements of each loop, except for the part where we select cluster centers. For this part, we use a recursive algorithm that efficiently identifies an initial set of centers and discarded vectors, thereby reducing the number of vectors to be processed. We explain this in detail in Appendix D. For a comparison between DBDPC and DPC, as well as a qualitative comparison with other clustering algorithms, refer to Appendix C. + +Which vectors should be used for distance calculation? + +As previously discussed, the DBDPC algorithm operates on a set of vectors that are used for distance calculation. To achieve effective clustering, the dot product between these vectors needs to accurately reflect the similarity between the corresponding visual tokens. Fortunately, transformers address this issue through the QKV self-attention mechanism. + +# Algorithm 2 DBDPC + +Input: Cutoff distance $d_{c} \in \mathbb{R}^{+}$ , normalization factor $d_{n} \in \mathbb{R}^{+}$ , set of vectors $\{\mathbf{u}_i \in \mathbb{R}^{d_1}\}_{i=1}^q$ + +Output: Cluster center indices $C_{\text{centers}}$ , element indices in each cluster $C_{\text{elements}}$ + +for all pairs $(\mathbf{u}_i,\mathbf{u}_j)$ do + +$$ +d _ {i j} = 1 - \frac {\mathbf {u} _ {i} \cdot \mathbf {u} _ {j}}{\| \mathbf {u} _ {i} \| _ {2} \| \mathbf {u} _ {j} \| _ {2}} +$$ + +end for + +for all vectors $\mathbf{u}_i$ do + +$$ +\rho_ {i} = \sum_ {j = 1} ^ {q} e ^ {- d _ {i j} / d _ {n}} +$$ + +end for + +Sort vectors by $\rho_{i}$ in descending order, obtaining indices $[i_1,i_2,\dots ,i_q]$ + +Initialize $C_{\mathrm{centers}} = \{i_1\}$ $C_{\mathrm{elements}} = \{i_1:\emptyset \}$ + +for all indices $i_k$ in sorted order do + +if $\min_{s\in C_{\mathrm{centers}}}d_{i_ks} > d_c$ then + +$$ +C _ {\text {c e n t e r s}} = C _ {\text {c e n t e r s}} \cup \left\{i _ {k} \right\} +$$ + +$$ +C _ {\text {e l e m e n t s}} \left[ i _ {k} \right] = \emptyset +$$ + +end if + +end for + +for all indices $i$ do + +$s_i = \text{argmin}_{s \in C_{\text{centers}}} d_{is}$ + +$$ +C _ {\text {e l e m e n t s}} [ s _ {i} ] = C _ {\text {e l e m e n t s}} [ s _ {i} ] \cup \{i \} +$$ + +end for + +Return $C_{\mathrm{centers}}$ $C_{\mathrm{elements}}$ + +Specifically, the key vectors $K$ provide a meaningful representation of each token, tailored for dot product similarity. Therefore, we will use the key vectors in the DBDPC algorithm. Formally, we have: + +$$ +C _ {\text {c e n t e r s}}, C _ {\text {e l e m e n t s}} = \mathrm {D B D P C} \left(K ^ {\prime}\right) \tag {7} +$$ + +where $K' = \{\mathbf{u}_i \in K \mid i \in S_{\text{important}}\}$ is the subset of keys consisting of elements with indices in $S_{\text{important}}$ . + +What about unimportant tokens near cluster centers? Tokens initially deemed unimportant but close enough to cluster centers have a high probability of being mislabeled. We add these tokens to the corresponding cluster to limit information loss. Formally, we define a threshold based on a coefficient $\alpha$ , where any token $\mathbf{u}_i$ , initially excluded, is added to the cluster of the closest center $s \in C_{\text{centers}}$ if its distance to the center satisfies $d_{is} < \alpha \cdot d_c$ . Specifically, the new cluster elements set $C_{\text{elements}}^{(s)}$ is updated as follows: + +$$ +S _ {\text {a d d e d}} ^ {(s)} = \left\{i \in S _ {\text {u n i m p o r t a n t}} \mid s = \operatorname {a r g m i n} _ {s ^ {\prime} \in C _ {\text {c e n t e r s}}} d _ {i s ^ {\prime}} \right. \tag {8} +$$ + +and $d_{is} < \alpha \cdot d_c\}$ + +$$ +C _ {\text {e l e m e n t s}} ^ {(s)} \leftarrow C _ {\text {e l e m e n t s}} ^ {(s)} \cup S _ {\text {a d d e d}} ^ {(s)} \tag {9} +$$ + +Merging Finally, the hidden states corresponding to the elements in each cluster are merged. Formally, the merged + +# Algorithm 3 PACT + +Input: Hidden states $\mathbf{H} = [\mathbf{h}_1, \dots, \mathbf{h}_n] \in \mathbb{R}^{n \times d}$ ; key and query matrices $\mathbf{K}, \mathbf{Q} \in \mathbb{R}^{n \times n_h \times d_h}$ ; position IDs $\mathbf{P} = [p_1, \dots, p_n]$ ; pruning percentage $\lambda \in [0, 1]$ ; cutoff distance $d_c > 0$ ; tolerance coefficient $\alpha > 0$ +Output: Merged hidden states $\mathbf{H}'$ ; new position IDs $\mathbf{P}'$ +Step 1: Identify important and unimportant tokens +$S_{\mathrm{important}}$ $S_{\mathrm{unimportant}}\gets \mathrm{EUTI}(\mathbf{H},\mathbf{K},\mathbf{Q},p)$ +Step 2: Cluster important tokens with DBDPC +$\mathbf{K}^{\prime}\gets \{\mathbf{k}_{i}\in \mathbf{K}\mid i\in S_{\mathrm{important}}\}$ +$C_{\mathrm{centers}}$ $C_{\mathrm{elements}}\gets \mathrm{DBDPC}(\mathbf{K}^{\prime},d_{c})$ +Step 3: Assign unimportant tokens to sufficiently close clusters. +for all $i\in S_{\mathrm{unimportant}}$ do +$s_i\gets argmin_s d_{is}$ +if $d_{isi} < \alpha .d_c$ then +$C_{\mathrm{elements}}^{(s_i)} \gets C_{\mathrm{elements}}^{(s_i)} \cup \{i\}$ +end if +end for +Step 4: Merge hidden states and assign position IDs +for all $s\in C_{\mathrm{centers}}$ do +$\mathbf{h}_s^{\prime}\gets \frac{1}{|C_{\mathrm{elements}}^{(s)}|}\sum_{i\in C_{\mathrm{elements}}^{(s)}}\mathbf{h}_i$ +$p_s^\prime \gets p_s$ +end for +Return H', P' + +hidden states are computed as: + +$$ +\mathbf {H} ^ {\prime} = \left\{\frac {1}{| C _ {\text {e l e m e n t s}} ^ {(j)} |} \sum_ {i \in C _ {\text {e l e m e n t s}} ^ {(j)}} \mathbf {h} _ {i} \mid C _ {\text {e l e m e n t s}} ^ {(j)} \in C _ {\text {e l e m e n t s}} \right\} \tag {10} +$$ + +Defining the position IDs Accurately assigning position IDs to each vector in the new hidden states $\mathbf{H}^{\prime}$ is crucial, especially for models using Rotary embeddings, as these IDs determine the input image structure or the temporal dependencies of the input video. In order to achieve a low statistical discrepancy compared to regular inference, we assign the position ID for each vector from $H^{\prime}$ as its corresponding cluster center. The full PACT pipeline is shown in Algorithm 3. When Rotary Embeddings are used, DBDPC uses the keys after these embeddings are applied, whereas EUTI uses the keys and queries before applying these embeddings. For clarity, we omit this detail in Algorithm 3. We also note that both DBDPC and EUTI, as well as PACT, do not use textual tokens. Therefore, visual token reduction is performed independently of the textual context, making our method well-suited for multi-turn conversations. + +Proportional attention Merging tokens reduces their influence in the attention mechanism and can therefore deteriorate performance if many important tokens are merged together. To mitigate this, we employ proportional attention. + +![](images/d0d05d5ff1ed06ed3b1b8cee55b23fa7e8799dd820eae1b3dbb0f21733a09cda.jpg) +Figure 5. Comparison between PACT, DBDPC, and EUTI against other visual token reduction methods across various reduction ratios applied on LLaVA-OneVision-7B. + +![](images/dae7661cfa177d954013243da7ff518a0070487864e2f9d776b1ef938eb9a64d.jpg) + +Let $K$ , $Q$ , and $V$ denote the keys, queries, and values at a layer $L'$ , where $L' \geq L$ . For each attention head $j$ , the attention scores are calculated as follows: + +$$ +A ^ {(j)} = \operatorname {s o f t m a x} \left(\frac {Q ^ {(j)} K ^ {(j) \top}}{\sqrt {d _ {l ^ {\prime}}}} + \log \mathbf {W} + \mathbf {B}\right) \tag {11} +$$ + +where $d_{l'}$ is the dimensionality of the query for each attention head. Here, $\mathbf{W}$ is a matrix representing the weight of each token, and $\mathbf{B}$ is the attention mask. Specifically, for visual tokens, $w_{i_0,i_1}$ represents the size of the cluster corresponding to token $i_1$ , for any value of $i_0$ . For each textual token at position $t$ , $w_{i_0,t} = 1$ , as they remain unmerged, retaining a weight of one. By scaling the attention scores based on $\mathbf{W}$ , the model effectively treats each visual token as if it represents multiple tokens. We note that when using proportional attention, we use PyTorch's scaled dot-product attention, which produces similar results to the official FlashAttention implementation while supporting custom masks. + +Selecting the layer $L$ for token reduction: To ensure maximum computational gain, we must choose an early layer $L$ for visual token reduction. However, we also require that the keys at the selected layer are not too similar, allowing for effective clustering and pruning. Thus, we select the earliest layer where the maximum distance between keys is sufficiently high. Figure 4 shows that in the initial layers of LLaVA-OneVision-7B, the keys corresponding to visual tokens are quite similar, indicating a lack of distinctive features necessary for effective pruning and clustering. + +# 4. Experiments + +# 4.1. Evaluation datasets + +We evaluate the effectiveness of PACT using diverse benchmarks, similar to those used for LLaVA-OneVision-7B, covering single-image, multi-image, and video tasks. We use AI2D [22], TextVQA [45], ChartQA [37], DocVQA [38], and InfographicVQA [39] to assess PACT's ability to reduce visual tokens while maintaining performance + +![](images/bb81e2fa9638c44f00f88ad9a19105b302a2c6038fe67cc94c6d7ee5995711c9.jpg) +Figure 6. Comparison between PACT and other visual token reduction methods across various reduction ratios applied on Qwen2-VL-7B-Instruct. + +in text-rich documents. To test reasoning across multiple disciplines, we use MME [15], MMBench [32], MMVet [51], MathVerse [57], MathVista [34], MMMU [53], MMStar [8], and ScienceQA [33]. Additionally, Vibe-Eval [40], MM-LiveBench [26], and LLaVA-Bench-Wilder [25] evaluate its robustness in real-world scenarios and visual chat contexts. We use LLaVA-Interleave Bench [25] and MuirBench [48] to examine PACT's efficiency in token reduction while preserving inter-image reasoning. To assess performance in video comprehension tasks, we use ActivityNet-QA [52], MLVU [58], VideoMME [16], EgoSchema [36], and PerceptionTest [41]. Finally, Video-ChatGPT [35] evaluates the method's effectiveness in dialogue-based video interaction. + +# 4.2. Evaluation setup + +In our comparison, we include approaches where the reduction is applied at a single layer, similar to PACT, such as FastV and clustering-based visual token reduction. For these approaches, we refer to the reduction ratio as the relative reduction in the number of visual tokens, defined as $1 - \frac{\text{number of visual tokens after reduction}}{\text{number of visual tokens before reduction}}$ . For all these approaches, we use the same value of $L$ and vary hyperparameters to test across different reduction ratios. For methods that use progressive token reduction, like ToME [6], or apply reduction after the visual encoder, as PruMerge and HiReD, or when the reduction ratio cannot be controlled at a fixed + +![](images/034e296c0cda8f8c270bbdf99aafdb75408397f5567fbda05c825203f40fcaa7.jpg) +Figure 7. Comparison between PACT and other visual token reduction methods across various reduction ratios applied on InternVL2-8B. + +layer, such as VTW, we adjust the parameters of these approaches to achieve the same average number of visual tokens across all layers as the one-layer reduction methods for a given reduction ratio. When evaluating clustering algorithms for visual token reduction, we apply proportional attention, as it consistently improves performance across all clustering algorithms, especially at high reduction ratios. Additionally, it is crucial to correctly assign position IDs to the resulting reduced set of visual tokens. Details on the assignment strategy are presented in Appendix E. When reporting processing time or throughput, we take into account the total time required by both the language model and the reduction algorithm per input element. In the next section, we base our comparison on a metric called the Approach-to-Reference Metric Ratio, defined as the average of the ratio of the metric of the tested approach to the metric obtained without visual token reduction across all test datasets. Formally we have Approach-to-Reference Metric Ratio $= \frac{1}{N} \sum_{i=1}^{N} \frac{\text{Metric with reduction}(i)}{\text{Metric no reduction}(i)}$ where $N$ is the total number of test datasets. This metric indicates how much of the original model capacity is retained. It is important to note that when using ToME for visual token reduction, a reduction ratio greater than 50% can't be achieved if the number of visual tokens is reduced by a fixed amount in each layer, as suggested in [6]. Instead, we use a scheduler to achieve higher reduction ratios, which we explain in Appendix F. More details on the hyperparameters used for evaluating PACT are provided in Appendix G. We follow the same dataset splits and metrics used for evaluating LLaVA-OneVision wherever feasible. More details are provided in Appendix H. Note that all experiments were conducted on a single A100 GPU. + +# 4.3. Results + +We compare PACT with FastV [9], VTW [30], ToME [6], PruMerge [44] and HiRED [3] on LLaVA-OneVision7B, InternVL2-8B, Qwen2-VL-7B-Instruct and LLaVA1.6-Mistral-7B. Since HiRED and PruMerge are only applicable to LLaVA-1.6, we exclude them from other comparisons. As shown in figures 5, 6, 7, and 8 PACT con + +![](images/935d857f27e28290132519c8ef931d40fe2dec2f6c3021d2718a77c20d124ce9.jpg) +Figure 8. Comparison between PACT and other visual token reduction methods across various reduction ratios applied on LLaVA-1.6-Mistral-7B. + +Table 2. Comparison of PACT with FastV, VTW, and ToME on LLaVA-OneVision-7B. Algo. Time refers to the average time the algorithm takes per input element, measured in seconds. Proc. Time refers to the average time taken by both the language model and the reduction algorithm per input element. Red. Ratio stands for average Reduction Ratio. The Algo. Time for VTW is nearly zero, and thus omitted. The different visual token reduction methods are evaluated at the same reduction ratio as PACT. + +
DatasetNo reductionPACT (Ours)FastVVTWToME
MetricProc. TimeMetricRed. RatioProc. TimeAlgo. TimeMetricProc. TimeAlgo. TimeMetricProc. TimeMetricProc. TimeAlgo. Time
VideoMME58.50.79257.665.6%0.3690.02157.00.3710.04046.90.29657.00.4170.091
MME15790.5541564.070.2%0.2430.0171576.00.2440.016842.00.2311556.90.3170.084
DocVQA87.21.08884.467.9%0.5190.02684.30.5240.05110.50.44961.90.5760.099
MLVU65.20.79564.766.4%0.3610.02262.90.3690.04054.40.31263.40.4170.092
LLaVA-Interleave64.10.24964.069.7%0.1330.01058.90.1390.00732.40.12350.30.1920.068
ChartQA79.90.67176.568.5%0.3410.01977.00.3420.01616.60.30763.40.4020.082
MMBench80.60.24980.369.3%0.1350.01079.00.1400.00552.40.12579.70.1930.066
MuirBench42.00.38443.167.8%0.1780.01340.40.1780.00934.90.16240.50.2330.072
ScienceQA95.90.23893.869.6%0.1330.01091.60.1370.00680.00.12493.80.1900.066
MMMU49.20.13948.970.4%0.1040.00748.90.1060.00343.50.09348.60.1240.062
AI2D81.50.38281.069.8%0.1860.01379.40.1910.01469.70.17779.70.2440.073
InfographicVQA66.00.89561.964.7%0.4810.02358.60.4830.04024.50.40848.30.6070.130
MMStar62.00.29760.169.7%0.1470.01158.60.1520.00737.20.16560.10.2290.069
ActivityNetQA54.50.92155.170.0%0.4190.02953.70.4250.04236.60.39454.10.5130.203
MM-LiveBench73.14.43471.767.5%3.2120.04764.43.2210.04441.03.08064.23.6070.102
LLaVA-Wilder71.010.1071.570.0%8.2620.03571.08.2630.02548.87.51568.07.9260.085
MathVerse16.80.83116.674.2%0.3610.02116.10.3820.03617.60.30116.50.5590.150
MathVista63.30.44062.070.7%0.2710.01559.50.2720.01638.50.26055.00.3380.071
MMVet58.04.60258.470.4%3.7930.03551.73.7950.03615.73.65247.24.1150.212
Vibe-Eval41.65.15339.171.1%3.7090.03238.23.7140.04712.33.55031.24.3170.095
VideoChatGPT3.252.9723.2567.2%1.8630.0293.221.8660.0401.921.3203.191.9750.205
EgoSchema60.10.81160.166.6%0.3510.02158.70.3530.04444.80.29759.80.3910.091
PerceptionTest52.10.80152.366.9%0.3530.02351.70.3570.04045.00.29651.10.3930.090
TextVQA75.80.69075.067.2%0.3320.02375.50.3360.02911.60.28762.50.3920.087
+ +siently outperforms other methods at both equal reduction ratios and equal throughput across all four models. VTW experiences a significant performance drop for reduction ratios above $40\%$ , indicating that removing all visual tokens is only effective when done in later layers. FastV and ToME struggle at high reduction ratios, while PruMerge and HiRED exhibit degradation even at low reduction ratios. Meanwhile, PACT maintains acceptable performance even at high reduction ratios. Table 2 and Table 3 shows that PACT outperforms other approaches on most of the test datasets when applied on LLaVA-OneVision-7B and Qwen2-VL-7B-Instruct. The same conclusion applies to other models, with detailed results provided in Appendix I. In Tab. 1, we report the reduction ratio, throughput, and maximum GPU memory consumption of the different approaches at an equal Approach-to-Reference Metric Ra + +![](images/bbee2722cb3ddcd6e6492ed26fea8d5f65ff30ed2c01704da099312ba5eed042.jpg) +Figure 9. Comparison of DBDPC and other clustering algorithms for visual token reduction at different reduction ratios on LLaVA-OneVision-7B. + +![](images/5d0471b07592f17764f244fb83f43851f0af23eb77d5e45eb7c28eaf67db9494.jpg) + +tio of $98.6\%$ on LLaVA-OneVision-7B. PACT significantly outperforms the other methods, achieving a reduction ratio of $71.3\%$ , a GPU memory reduction of $31\%$ , and a $225\%$ speedup in the language model's inference time. The per-dataset results used to compute these metrics are shown in Tab. 5. Tab. 1 also indicates that when using FastV, the maximum GPU memory consumption is relatively high due to the costly computation of attention scores. We further compare DBDPC against agglomerative clustering [1], k-means [2], Density Peaks Clustering (DPC) [5], and DBSCAN [14], with results presented in Fig. 9. The graphs reveal that DBDPC consistently outperforms other clustering algorithms for visual token reduction, exhibiting less performance degradation at equal reduction ratios and demonstrating improved computational efficiency, leading to better throughput. These results validate our hypothesis that, for an effective visual token reduction, it is necessary to ensure that the distances between elements within each cluster do not exceed a predefined threshold. Fig. 5 also shows that EUTI consistently outperforms FastV at equal reduction ratios and is less costly, as it does not require the computation of attention scores. In addition, unlike FastV, EUTI does not introduce a GPU memory overhead1. We provide additional numerical results in Appendix I. + +# 4.4. Ablation study + +Fig. 5 shows that PACT consistently outperforms both DBDPC and EUTI across various reduction ratios. This confirms that combining clustering and pruning techniques + +Table 3. Comparison of PACT with FastV, VTW, and ToME applied on Qwen2-VL-7B-Instruct across Various Datasets. + +
DatasetNo ReductionPACT (Ours)FastVVTWToME
MetricProc. TimeMetricRed. RatioProc. TimeMetricProc. TimeMetricProc. TimeMetricProc. Time
MME1654.50.2381666.586.3%0.1101500.00.111709.240.1201610.90.140
DocVQA93.90.51690.577.5%0.29486.60.2988.50.24942.90.350
TextVQA81.80.15580.467.5%0.13279.90.13513.20.11866.20.151
InfographicVQA74.60.47870.669.7%0.27863.30.27321.50.22543.90.299
ChartQA80.80.14576.061.1%0.13569.20.13412.90.12355.10.155
MMBench77.60.07477.151.5%0.07777.10.07476.90.07375.90.080
MuirBench40.70.15941.276.9%0.11340.40.11237.90.11175.80.125
MMMU51.40.10951.272.6%0.09349.30.09245.40.08848.90.105
AI2D79.90.10578.464.2%0.09676.20.09769.00.08776.40.115
MMStar56.00.07254.861.3%0.07251.50.06740.30.06553.80.077
EgoSchema62.10.36061.660.0%0.20760.20.21246.30.19061.20.230
MathVerse25.30.62024.582.2%0.39323.70.39613.90.29618.10.651
MathVista59.20.24957.773.3%0.19556.40.19436.80.16553.50.275
MM Vet24.94.70025.180.3%3.82022.33.8302.73.65016.74.780
Vibe-Eval47.53.20046.185.0%2.31044.32.37513.11.99329.63.620
LLaVA-Interleave35.90.12035.573.7%0.10034.70.10133.20.09635.30.125
MM-LiveBench72.63.97070.777.1%3.04063.03.12039.72.97057.64.450
+ +yields better performance than using each approach independently, as the combined method addresses both visual tokens irrelevance and redundancy. We ablate several components of the DBDPC algorithm and present the results in Fig. 10. First, we ablate token merging by selecting the center of each cluster as the representative token instead of merging tokens within each cluster. We also ablate the use of proportional attention. Additionally, we ablate the assignment of position IDs to the reduced set of tokens and experiment with two alternatives: using the mean of position IDs of all elements in each cluster and assigning position IDs sequentially after reordering the reduced set according to the mean of position IDs. Finally, we ablate the use of key vectors in the clustering process and instead use hidden states. Our results show that each ablated component contributes positively to the performance of the DBDPC algorithm. Notably, correctly assigning position IDs to the reduced set is crucial, as these position IDs reflect the structure of input images and the temporal order of input videos. Additionally, proportional attention proves effective at higher reduction ratios, while token merging en + +![](images/f368bf2a470b7eb965c3a82944f7d45511b9292e73f2461e19a596acc4fe5f82.jpg) +Figure 10. Ablation study of DBDPC and EUTI on LLaVA-OneVision-7B. + +![](images/991768b7b1eccb7cc3fe0e64629c7ae67379f84e5f598eb26031e9d494ff3b94.jpg) + +hances performance once the reduction ratio exceeds $50\%$ . The figure also confirms that keys are better suited for cosine similarity-based distance calculations, as they are naturally used in dot products within the attention mechanism. We perform two separate ablations on Eq. (2) of the EUTI algorithm. The first ablation removes the use of hidden state norms, while the second ablates the use of the global query, which corresponds to using only the hidden state norms. The results in Fig. 10 show that combining both the global query-based score and the norm of hidden states consistently leads to better results than using either metric alone, suggesting that they provide complementary information about the importance of each visual token. Finally, we ablate the pruned token recovery module in PACT by setting $\alpha$ to zero, with results presented in Fig. 11. The plot shows that reintegrating visual tokens initially deemed unimportant but close enough to a cluster center consistently enhances performance across different reduction ratios, supporting our hypothesis that these tokens were likely mislabeled by the EUTI module. Figure 11 also shows the effect of the choice of the reduction layer on PACT's performance, demonstrating the effectiveness of our reduction layer identification approach. We provide additional numerical results in Appendix J. + +![](images/a13ddcbf4f57cce0737e9972df5ccd4e3a0e4c3b45929d11bb24821088eda9ec.jpg) +Figure 11. Ablation study of PACT on LLaVA-OneVision-7B. + +![](images/1ead4ca5498c7b123994ccf382a10967b041bc2ad7b30217535496e774d44fd7.jpg) + +# 5. Conclusion + +In this work, we presented PACT, a method that addresses both visual token irrelevance and redundancy. PACT is a plug-and-play solution that does not require additional training. It does not rely on textual tokens for visual token reduction, making it well-suited for multi-turn conversations. Additionally, it operates independently of the visual encoder and connector architecture, making it broadly applicable across various Visual Language Models. Our results confirm that the number of visual tokens in Visual Language Models is unnecessarily large and provide valuable insights for effective token reduction. This opens the door for future work in designing more efficient connectors and architectures for VLMs. + +# 6. Acknowledgments + +This work received financial support from Crédit Agricole S.A. through the research chair with Ecole Polytechnique on Trustworthy and Responsible AI. This work was granted access to the HPC resources of IDRIS under the allocation 2024-AD011014793R1 made by GENCI. + +# References + +[1] Marcel R Ackermann, Johannes Blömer, Daniel Kuntze, and Christian Sohler. Analysis of agglomerative clustering. *Algorithmica*, 69:184-215, 2014. 8, 3 +[2] Mohiuddin Ahmed, Raihan Seraj, and Syed Mohammed Shamsul Islam. The k-means algorithm: A comprehensive survey and performance evaluation. Electronics, 9(8):1295, 2020. 5, 8, 3 +[3] Kazi Hasan Ibn Arif, JinYi Yoon, Dimitrios S Nikolopoulos, Hans Vandierendonck, Deepu John, and Bo Ji. Hired: Attention-guided token dropping for efficient inference of high-resolution vision-language models in resource-constrained environments. arXiv preprint arXiv:2408.10945, 2024. 2, 7 +[4] Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-vl: A frontier large vision-language model with versatile abilities. arXiv preprint arXiv:2308.12966, 2023. 1 +[5] Panthadeep Bhattacharjee and Pinaki Mitra. A survey of density based clustering algorithms. Frontiers of Computer Science, 15:1-27, 2021. 5, 8, 3 +[6] Daniel Bolya, Cheng-Yang Fu, Xiaoliang Dai, Peizhao Zhang, Christoph Feichtenhofer, and Judy Hoffman. Token merging: Your vit but faster. arXiv preprint arXiv:2210.09461, 2022. 2, 7, 4 +[7] Keqin Chen, Zhao Zhang, Weili Zeng, Richong Zhang, Feng Zhu, and Rui Zhao. Shikra: Unleashing multimodal llm's referential dialogue magic. arXiv preprint arXiv:2306.15195, 2023. 2 +[8] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Jiaqi Wang, Yu Qiao, + +Dahua Lin, et al. Are we on the right way for evaluating large vision-language models? arXiv preprint arXiv:2403.20330, 2024. 7 +[9] Liang Chen, Haozhe Zhao, Tianyu Liu, Shuai Bai, Junyang Lin, Chang Zhou, and Baobao Chang. An image is worth 1/2 tokens after layer 2: Plug-and-play inference acceleration for large vision-language models. arXiv preprint arXiv:2403.06764, 2024. 2, 3, 7 +[10] Zhe Chen, Weiyun Wang, Hao Tian, Shenglong Ye, Zhang-wei Gao, Erfei Cui, Wenwen Tong, Kongzhi Hu, Jiapeng Luo, Zheng Ma, et al. How far are we to gpt-4v? closing the gap to commercial multimodal models with open-source suites. arXiv preprint arXiv:2404.16821, 2024. 2 +[11] Yunfei Chu, Jin Xu, Xiaohuan Zhou, Qian Yang, Shiliang Zhang, Zhijie Yan, Chang Zhou, and Jingren Zhou. Qwen-audio: Advancing universal audio understanding via unified large-scale audio-language models. arXiv preprint arXiv:2311.07919, 2023. 1 +[12] Tri Dao, Daniel Y. Fu, Stefano Ermon, Atri Rudra, and Christopher Ré. Flashattention: Fast and memory-efficient exact attention with io-awareness, 2022. 1, 3 +[13] Mohamed Dhouib, Ghassen Bettaieb, and Aymen Shabou. Docparser: End-to-endOCR-free information extraction from visually rich documents. In International Conference on Document Analysis and Recognition, pages 155-172. Springer, 2023. 2 +[14] Martin Ester, Hans-Peter Kriegel, Jörg Sander, Xiaowei Xu, et al. A density-based algorithm for discovering clusters in large spatial databases with noise. In kdd, pages 226-231, 1996. 5, 8, 3 +[15] Chaoyou Fu, Peixian Chen, Yunhang Shen, Yulei Qin, Mengdan Zhang, Xu Lin, Jinrui Yang, Xiawu Zheng, Ke Li, Xing Sun, et al. Mme: A comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394, 2023. 7 +[16] Chaoyou Fu, Yuhan Dai, Yondong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, et al. Video-mme: The first-ever comprehensive evaluation benchmark of multi-modal llms in video analysis. arXiv preprint arXiv:2405.21075, 2024. 7 +[17] Tao Gong, Chengqi Lyu, Shilong Zhang, Yudong Wang, Miao Zheng, Qian Zhao, Kuikun Liu, Wenwei Zhang, Ping Luo, and Kai Chen. Multimodal-gpt: A vision and language model for dialogue with humans. arXiv preprint arXiv:2305.04790, 2023. 2 +[18] Jiaming Han, Kaixiong Gong, Yiyuan Zhang, Jiaqi Wang, Kaipeng Zhang, Dahua Lin, Yu Qiao, Peng Gao, and Xiangyu Yue. Onellm: One framework to align all modalities with language. arXiv preprint arXiv:2312.03700, 2023. 1 +[19] Shaohan Huang, Li Dong, Wenhui Wang, Yaru Hao, Saksham Singhal, Shuming Ma, Tengchao Lv, Lei Cui, Owais Khan Mohammed, Qiang Liu, et al. Language is not all you need: Aligning perception with language models. arXiv preprint arXiv:2302.14045, 2023. 1 +[20] Eric Jang, Shixiang Gu, and Ben Poole. Categorical reparameterization with gumbel-softmax. arXiv preprint arXiv:1611.01144, 2016. 2 + +[21] Yang Jin, Kun Xu, Liwei Chen, Chao Liao, Jianchao Tan, Bin Chen, Chenyi Lei, An Liu, Chengru Song, Xiaogiang Lei, et al. Unified language-vision pretraining with dynamic discrete visual tokenization. arXiv preprint arXiv:2309.04669, 2023. 2 +[22] Aniruddha Kembhavi, Mike Salvato, Eric Kolve, Minjoon Seo, Hannaneh Hajishirzi, and Ali Farhadi. A diagram is worth a dozen images. In European conference on computer vision, pages 235-251. Springer, 2016. 6 +[23] Geewook Kim, Teakgyu Hong, Moonbin Yim, JeongYeon Nam, Jinyoung Park, Jinyeong Yim, Wonseok Hwang, Sangdoo Yun, Dongyoon Han, and Seunghyun Park. Ocr-free document understanding transformer. In Computer Vision – ECCV 2022, pages 498–517, Cham, 2022. Springer Nature Switzerland. 2 +[24] Bohao Li, Rui Wang, Guangzhi Wang, Yuying Ge, Yixiao Ge, and Ying Shan. Seed-bench: Benchmarking multimodal llms with generative comprehension. arXiv preprint arXiv:2307.16125, 2023. 4 +[25] Bo Li, Kaichen Zhang, Hao Zhang, Dong Guo, Renrui Zhang, Feng Li, Yuanhan Zhang, Ziwei Liu, and Chunyuan Li. Llava-last: Stronger llms supercharge multimodal capabilities in the wild, 2024. 7 +[26] Bo Li, Peiyuan Zhang, Kaichen Zhang, Fanyi Pu, Xinrun Du, Yuhao Dong, Haotian Liu, Yuanhan Zhang, Ge Zhang, Chunyuan Li, and Ziwei Liu. Lmms-eval: Accelerating the development of large multimodal models, 2024. 7, 4, 5 +[27] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024. 2, 4, 5 +[28] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. arXiv preprint arXiv:2301.12597, 2023. 2 +[29] Youwei Liang, Chongjian Ge, Zhan Tong, Yibing Song, Jue Wang, and Pengtao Xie. Not all patches are what you need: Expediting vision transformers via token reorganizations. arXiv preprint arXiv:2202.07800, 2022. 2 +[30] Zhihang Lin, Mingbao Lin, Luxi Lin, and Rongrong Ji. Boosting multimodal large language models with visual tokens withdrawal for rapid inference. arXiv preprint arXiv:2405.05803, 2024. 2, 7 +[31] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. arXiv preprint arXiv:2304.08485, 2023. 1, 2 +[32] Yuan Liu, Haodong Duan, Yuanhan Zhang, Songyang Zhang Bo Li, and Wangbo Zhao. Mmbench: Is your multi-modal model an all-around player? arXiv:2307.06281, 2023. 7 +[33] Pan Lu, Swaroop Mishra, Tony Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter Clark, and Ashwin Kalyan. Learn to explain: Multimodal reasoning via thought chains for science question answering, 2022. 7 +[34] Pan Lu, Hritik Bansal, Tony Xia, Jiacheng Liu, Chunyuan Li, Hannaneh Hajishirzi, Hao Cheng, Kai-Wei Chang, Michel Galley, and Jianfeng Gao. Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts. In In + +ternational Conference on Learning Representations (ICLR), 2024. 7 +[35] Muhammad Maaz, Hanoona Rasheed, Salman Khan, and Fahad Shahbaz Khan. Video-chatgpt: Towards detailed video understanding via large vision and language models. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (ACL 2024), 2024. 7 +[36] Karttikeya Mangalam, Raiymbek Akshulakov, and Jitendra Malik. Egoschema: A diagnostic benchmark for very long-form video language understanding. Advances in Neural Information Processing Systems, 36:46212-46244, 2023. 7 +[37] Ahmed Masry, Do Xuan Long, Jia Qing Tan, Shafiq Joty, and Enamul Hoque. Chartqa: A benchmark for question answering about charts with visual and logical reasoning. arXiv preprint arXiv:2203.10244, 2022. 6 +[38] Minesh Mathew, Dimosthenis Karatzas, and CV Jawahar. Docvqa: A dataset for vqa on document images. In Proceedings of the IEEE/CVF winter conference on applications of computer vision, pages 2200-2209, 2021. 6 +[39] Minesh Mathew, Viraj Bagal, Ruben Tito, Dimosthenis Karatzas, Ernest Valveny, and CV Jawahar. Infographicvqa. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 1697-1706, 2022. 6 +[40] Piotr Padlewski, Max Bain, Matthew Henderson, Zhongkai Zhu, Nishant Relan, Hai Pham, Donovan Ong, Kaloyan Aleksiev, Aitor Ormazabal, Samuel Phua, Ethan Yeo, Eugenie Lamprecht, Qi Liu, Yuqi Wang, Eric Chen, Deyu Fu, Lei Li, Che Zheng, Cyprien de Masson d'Autume, Dani Yogatama, Mikel Artetxe, and Yi Tay. Vibe-eval: A hard evaluation suite for measuring progress of multimodal language models. arXiv preprint arXiv:2405.02287, 2024. 7 +[41] Viorica Patraucean, Lucas Smaira, Ankush Gupta, Adria Recasens, Larisa Markeeva, Dylan Banarse, Skanda Koppula, Mateusz Malinowski, Yi Yang, Carl Doersch, et al. Perception test: A diagnostic benchmark for multimodal video models. Advances in Neural Information Processing Systems, 36, 2024. 7 +[42] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision, 2021. 2 +[43] Erich Schubert. A Triangle Inequality for Cosine Similarity, page 32-44. Springer International Publishing, 2021. 1 +[44] Yuzhang Shang, Mu Cai, Bingxin Xu, Yong Jae Lee, and Yan Yan. Llava-prumerge: Adaptive token reduction for efficient large multimodal models. arXiv preprint arXiv:2403.15388, 2024. 2, 7 +[45] Amanpreet Singh, Vivek Natarajan, Meet Shah, Yu Jiang, Xinlei Chen, Dhruv Batra, Devi Parikh, and Marcus Rohrbach. Towards vqa models that can read. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8317-8326, 2019. 6 +[46] Dingjie Song, Wenjun Wang, Shunian Chen, Xidong Wang, Michael Guan, and Benyou Wang. Less is more: A simple yet effective token reduction method for efficient multimodal llms. arXiv preprint arXiv:2409.10994, 2024. 2 + +[47] Jianlin Su, Murtadha Ahmed, Yu Lu, Shengfeng Pan, Wen Bo, and Yunfeng Liu. Roformer: Enhanced transformer with rotary position embedding. Neurocomputing, 568:127063, 2024. 4 +[48] Fei Wang, Xingyu Fu, James Y Huang, Zekun Li, Qin Liu, Xiaogeng Liu, Mingyu Derek Ma, Nan Xu, Wenxuan Zhou, Kai Zhang, et al. Muirbench: A comprehensive benchmark for robust multi-image understanding. arXiv preprint arXiv:2406.09411, 2024. 7 +[49] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Yang Fan, Kai Dang, Mengfei Du, Xuancheng Ren, Rui Men, Dayiheng Liu, Chang Zhou, Jingren Zhou, and Junyang Lin. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution, 2024. 2 +[50] Qinghao Ye, Haiyang Xu, Guohai Xu, Jiabo Ye, Ming Yan, Yiyang Zhou, Junyang Wang, Anwen Hu, Pengcheng Shi, Yaya Shi, et al. mplug-owl: Modularization empowers large language models with multimodality. arXiv preprint arXiv:2304.14178, 2023. 2 +[51] Weihao Yu, Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Zicheng Liu, Xinchao Wang, and Lijuan Wang. Mm-vet: Evaluating large multimodal models for integrated capabilities, 2023. 7 +[52] Zhou Yu, Dejing Xu, Jun Yu, Ting Yu, Zhou Zhao, Yueting Zhuang, and Dacheng Tao. Activitynet-qa: A dataset for understanding complex web videos via question answering. In AAAI, pages 9127–9134, 2019. 7 +[53] Xiang Yue, Yuansheng Ni, Kai Zhang, Tianyu Zheng, Ruoqi Liu, Ge Zhang, Samuel Stevens, Dongfu Jiang, Weiming Ren, Yuxuan Sun, Cong Wei, Botao Yu, Ruibin Yuan, Ren-liang Sun, Ming Yin, Boyuan Zheng, Zhenzhu Yang, Yibo Liu, Wenhao Huang, Huan Sun, Yu Su, and Wenhu Chen. Mmmu: A massive multi-discipline multimodal understanding and reasoning benchmark for expert agi. In Proceedings of CVPR, 2024. 7 +[54] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training, 2023. 2 +[55] Duzhen Zhang, Yahan Yu, Chenxing Li, Jiahua Dong, Dan Su, Chenhui Chu, and Dong Yu. Mm-llms: Recent advances in multimodal large language models. arXiv preprint arXiv:2401.13601, 2024. 1 +[56] Hang Zhang, Xin Li, and Lidong Bing. Video-llama: An instruction-tuned audio-visual language model for video understanding. arXiv preprint arXiv:2306.02858, 2023. 1 +[57] Renrui Zhang, Dongzhi Jiang, Yichi Zhang, Haokun Lin, Ziyu Guo, Pengshuo Qiu, Aojun Zhou, Pan Lu, Kai-Wei Chang, Peng Gao, and Hongsheng Li. Mathverse: Does your multi-modal llm truly see the diagrams in visual math problems?, 2024. 7 +[58] Junjie Zhou, Yan Shu, Bo Zhao, Boya Wu, Shitao Xiao, Xi Yang, Yongping Xiong, Bo Zhang, Tiejun Huang, and Zheng Liu. Mlvu: A comprehensive benchmark for multi-task long video understanding. arXiv preprint arXiv:2406.04264, 2024. 7 + +# PACT: Pruning and Clustering-Based Token Reduction for Faster Visual Language Models Supplementary Materials + +# A. On the density peaks clustering algorithm + +Density Peak Clustering (DPC) is a clustering algorithm that identifies cluster centers based on local density and the distance to points with higher density, denoted as $\delta_{i}$ . The density, $\rho_{i}$ , can be measured by counting the number of points within a cutoff distance $d_{c}$ from $\mathbf{u}_{i}$ , or by using a Gaussian function where nearby points contribute more to the density, $\rho_{i} = \sum_{j}\exp \left(-\left(\frac{d_{ij}}{d_c}\right)^2\right)$ . Points with high $\rho_{i}$ and $\delta_{i}$ values are selected as cluster centers. This selection can be done by defining a threshold $t$ and designating points as cluster centers where $\rho_{i}\cdot \delta_{i}\geq t\times \max (\rho_{i}\cdot \delta_{i})$ , or by selecting a fixed percentage. Other points are then assigned to the cluster of the nearest higher-density point, iterating from the highest to the lowest density. This process can create clusters of varying shapes, where the maximum distance between elements within a cluster can be extremely large. In extreme cases, the two farthest points in the input data can end up in the same cluster. + +# B. DBDPC Characteristics + +This section aims to prove that DBDPC guarantees that: Each element's distance to its assigned cluster center is at most $d_{c}$ and that all cluster centers are at least $d_{c}$ apart. + +Assume, for contradiction, that at least one of the following statements is false: + +1. There exists an element $i$ assigned to a cluster such that its distance to the cluster center is greater than $d_{c}$ , i.e., $d_{is} > d_{c}$ . +2. There exist two cluster centers $s_1, s_2$ such that their pairwise distance is at most $d_c$ , i.e., $d_{s_1s_2} \leq d_c$ . + +Contradiction for Assumption 1 In DBDPC, each element $i$ is assigned to its closest cluster center: + +$$ +s_{i} = \arg \min_{s\in C_{\text{centers}}}d_{is}. +$$ + +If $d_{is} > d_c$ for a given center $s$ , then we have $d_{is'} > d_c$ for all centers. However, in the DBDPC selection process, an element is assigned as a cluster center if its minimum distance to already selected centers is over $d_c$ . Thus, $i$ should have been selected as a new cluster center, and its distance to the closest cluster center would be zero, which leads to a contradiction, proving that every element satisfies $d_{is} \leq d_c$ . + +Contradiction for Assumption 2 Assume, without loss of generality, that $s_2$ is chosen after $s_1$ . By the center selec + +tion criterion, a new center $s_2$ is added only if: + +$$ +\min_{s\in C_{\text{centers}}}d_{s_{2}s} > d_{c}. +$$ + +If $d_{s_1 s_2} \leq d_c$ , then $s_2$ shouldn't be selected as a cluster center, which leads to a contradiction. Thus, no two centers can be closer than $d_c$ . + +Inter-cluster distance upper-bound : Here we will refer to cosine similarity by sim. Let's $x$ and $y$ be two points in the same cluster, and $s$ their cluster center. Since each point $\mathbf{x}$ is within $d_c$ of its cluster center $\mathbf{s}$ and the distance used in the DBDPC algorithm is $1 - \mathrm{sim}$ , we have $\mathrm{sim}(\mathbf{x},\mathbf{s})\geq 1 - d_c$ . We have from [43]: + +$$ +\operatorname {s i m} (\mathbf {x}, \mathbf {y}) \geq \operatorname {s i m} (\mathbf {x}, \mathbf {s}) \cdot \operatorname {s i m} (\mathbf {s}, \mathbf {y}) + m - 1, +$$ + +$$ +\text {w h e r e} m = \min \left\{\operatorname {s i m} (\mathbf {x}, \mathbf {s}) ^ {2}, \operatorname {s i m} (\mathbf {s}, \mathbf {y}) ^ {2} \right\}. +$$ + +Using $\mathrm{sim}(\mathbf{x},\mathbf{s}),\mathrm{sim}(\mathbf{s},\mathbf{y})\geq 1 - d_c$ we get + +$$ +\operatorname {s i m} (\mathbf {x}, \mathbf {y}) \geq (1 - d _ {c}) ^ {2} + (1 - d _ {c}) ^ {2} - 1 = 1 - 2 d _ {c} (2 - d _ {c}). +$$ + +Finally, converting this back to the distance $d(\mathbf{x}, \mathbf{y}) = 1 - \sin(\mathbf{x}, \mathbf{y})$ , we obtain: + +$$ +d (\mathbf {x}, \mathbf {y}) \leq 2 d _ {c} (2 - d _ {c}). +$$ + +Therefore, the intra-cluster distance in the DBDPC algorithm is bounded by $2d_{c}(2 - d_{c})$ . + +# C. A comparison between DBDPC and other clustering algorithms + +Comparison between DBDPC and DPC: We note that, aside from using densities, DBDPC is fundamentally different from DPC. Please refer to Appendix A for a detailed explanation of the DPC algorithm. The center identification process in DBDPC results in two main characteristics with formal proof detailed in Appendix B. First, the distance between each element and its cluster center is below $d_{c}$ , which leads to inter-cluster distances being upper-bounded by $2d_{c} \times (2 - d_{c})$ . Additionally, the distance between cluster centers is lower-bounded by $d_{c}$ . These guarantees do not hold for DPC, leading to two drawbacks. Since intercluster distances are not controlled, merging these vectors may result in merging highly dissimilar vectors, leading to information loss. Also, in high-density regions, the distance between cluster centers becomes too small, making DPC ineffective in addressing information redundancy. + +A Qualitative comparison Figure 12 presents the clustering results for DBDPC, DPC, DBSCAN, and K-Means on a + +Algorithm 4 Recursive Center Identification for DBDPC with Iterative Center Identification +```txt +Input: Cutoff distance $d_{c}\in \mathbb{R}^{+}$ , set of vectors $\mathbf{U} = \{\mathbf{u}_i\in$ $\mathbb{R}^{d_l}\}_{i = 1}^n$ , density values $\{\rho_i\}_{i = 1}^n$ , distance matrix $D =$ $[d_{ij}]$ , fallback threshold $T > 0$ +``` + +```txt +Output: Cluster center indices $C_{\text{centers}}$ Initialize cluster center set $C_{\text{centers}} =$ +``` + +Set the density of each point : + +$$ +\rho_ {i} = \mathrm {a r g s o r t} \big (\{- \rho_ {j} \} _ {j = 1} ^ {n} \big) [ i ] +$$ + +while $\mathbf{U}\neq \emptyset$ do + +Compute $\delta_{i}$ for all vectors $\mathbf{u}_i\in \mathbf{U}$ + +$$ +\delta_ {i} = \min _ {\rho_ {j} > \rho_ {i}} d _ {i j} +$$ + +Select cluster candidates: + +$$ +\mathbf {C} _ {\text {n e w}} = \left\{\mathbf {u} _ {i} \in \mathbf {U} \mid \delta_ {i} > d _ {c} \right\} +$$ + +$C_{\mathrm{centers}} \gets C_{\mathrm{centers}} \cup \mathbf{C}_{\mathrm{new}}$ + +Update remaining vectors: + +$$ +\mathbf {U} \leftarrow \mathbf {U} \backslash \left(\mathbf {C} _ {\text {n e w}} \cup \left\{\mathbf {u} _ {k} \in \mathbf {U} \mid \begin{array}{c} \exists \mathbf {u} _ {i} \in \mathbf {C} _ {\text {n e w}} \\ \text {s u c h t h a t} d _ {i k} \leq d _ {c} \end{array} \right\}\right) +$$ + +if $|\mathbf{C}_{\mathrm{new}}| < T$ then + +Order remaining vectors $\mathbf{U}$ by decreasing $\rho_{i}$ : + +$\mathbf{U}\gets \mathrm{Sort}(\mathbf{U},\mathrm{key} = \rho_{i},\mathrm{order} = \mathrm{descending})$ + +Call Iterative Center Identification: + +$C_{\mathrm{centers}} \gets$ IterativeCenterIdentification( $C_{\mathrm{centers}}$ , $\mathbf{U}$ , $d_c$ ) + +return $C_{\mathrm{centers}}$ + +end if + +end while + +return $C_{\mathrm{centers}}$ + +# Function: Iterative Center Identification + +Inputs: Remaining vectors $\mathbf{U}$ (ordered by $\rho_{i}$ ), current cluster center set $C_{\mathrm{centers}}$ , cutoff distance $d_{c}$ + +Outputs: Updated cluster center indices $C_{\text{centers}}$ + +for all $\mathbf{u}_i\in \mathbf{U}$ do + +if $\min_{\mathbf{u}_s\in C_{\mathrm{centers}}}d_{is} > d_c$ then + +$C_{\mathrm{centers}} \gets C_{\mathrm{centers}} \cup \{\mathbf{u}_i\}$ + +end if + +end for + +return $C_{\mathrm{centers}}$ + +predefined set of two-dimensional points. The figure shows that only DBDPC and DBSCAN identify isolated points as distinct clusters, a crucial feature for visual token reduction, as these points contain unique and thus potentially valuable information. We note that, for DBSCAN, these isolated + +![](images/bc4c9351f57e236b7889f9a925abb16135b22bfdb57da6783bf7f4536a9da928.jpg) + +![](images/922cdcffc7ad49660bc707651ee240d440711902642b8a121d23064d7db11c56.jpg) +Figure 12. An illustrative example of the difference in clustering characteristics between DBDPC and other clustering algorithms. Two-dimensional points and the Euclidean distance were used for illustration purposes. + +![](images/20af734f0650efb1295f05d68e52bb6af99dca6da5f563347afdb01dacadf413.jpg) + +![](images/e4261b04fef109488d07af722c5985ca2a3835c943c104f033a8b1cf2613810e.jpg) + +points may be identified as noise, depending on the chosen hyperparameters. Moreover, DBDPC partitions both the left and right groups of points into the same number of clusters, maintaining consistency despite the higher density on the left side. In contrast, DPC tends to form a greater number of clusters in high-density regions while creating large clusters in low-density areas, whereas DBSCAN follows the opposite pattern, producing large clusters in high-density regions. In the context of visual token reduction, merging points within these large clusters can result in information loss, leading to performance degradation and making DPC and DBSCAN less suitable than DBDPC for this task. We note that the results presented in Fig. 12 for DPC and DBSCAN may change when modifying the hyperparameters; however, the characteristics discussed above persist across different hyperparameter choices. + +# D. Efficient center identification in DBDPC + +# D.1. A recursive approach + +To enhance the efficiency of the DBDPC algorithm, we introduce a recursive center identification method that reduces computational overhead while maintaining clustering accuracy. In the DBDPC algorithm, vectors are processed in descending order of their local densities $\rho_{i}$ , and a vector $\mathbf{u}_i$ is selected as a cluster center if it is farther than the cutoff distance $d_c$ from all previously selected centers. Implementing this as described in the algorithm requires sequentially iterating through all the vectors and checking distances to all previously selected centers, which does not fully leverage GPU parallelization capabilities. In the DBDPC algorithm, when two points have the same density, one is treated as if it has a higher density than the other, depending on the order of their processing. To replicate this behavior, we assign the + +density of each point to its rank as: + +$$ +\rho_ {i} = \operatorname {r a n k} _ {i} = \operatorname {a r g s o r t} \left(\left\{- \rho_ {j} \right\} _ {j = 1} ^ {n}\right) [ i ] +$$ + +Our accelerated method leverages the quantity $\delta_{i}$ , representing the minimum distance from vector $\mathbf{u}_i$ to any higher-density vector: + +$$ +\delta_ {i} = \min _ {\rho_ {j} > \rho_ {i}} d _ {i j} \tag {12} +$$ + +If $\delta_{i} > d_{c}$ , then $\mathbf{u}_{i}$ is selected as a cluster center because it is not within $d_{c}$ of any higher-density vector, which are the only potential cluster centers that can be selected before $d_{ij}$ in the DBDPC algorithm. In addition, any vector within $d_{c}$ of a cluster center identified using $\delta_{i}$ has a lower density than that center, as cluster centers identified using $\delta_{i}$ are not within $d_{c}$ of any higher-density vector. In the DBDPC algorithm, such a vector would not be chosen as a cluster center because it violates the distance condition relative to already selected centers. By identifying these vectors early, we can exclude them from further consideration as potential centers. We repeat this process recursively: after selecting cluster centers where $\delta_{i} > d_{c}$ and excluding vectors within $d_{c}$ of these centers, we process the remaining vectors. This recursion continues until the number of newly discovered cluster centers becomes small (e.g., less than 10). At that point, we fall back to the DBDPC method, processing the remaining vectors iteratively to ensure all potential centers are considered. This recursive approach reduces the number of iterations in the main loop and enhances parallelization, particularly on GPUs, by minimizing sequential computation. By leveraging $\delta_{i}$ and incorporating an early exclusion mechanism, the recursive center identification method reduces computational time while ensuring the same clustering results as the DBDPC algorithm. The recursive approach decreases the number of iterations and enhances GPU parallelization by minimizing sequential computation, making the algorithm more efficient for large datasets. The recursive center identification method is presented in Algorithm 4. We note that in practice this recursive approach reduces the computational time of the DBDPC algorithm by around 3 times. + +# D.2. Proof of correctness of the recursive approach + +To validate the correctness of the accelerated method, we demonstrate the following key points: selected centers are valid cluster centers, excluded vectors are not cluster centers and identifying remaining cluster centers is equivalent to identifying cluster centers on the reduced set. Proving these points suffices to establish correctness, as the remaining vectors after the recursive steps are treated the same as in the DBDPC algorithm. + +Selected Centers Are Valid Cluster Centers In the DB-DPC algorithm, for any vector $\mathbf{u}_i$ , only vectors with higher + +densities are considered for selection as cluster centers before $\mathbf{u}_i$ . If $\mathbf{u}_i$ is not within $d_c$ of any higher-density vector (i.e., $\delta_i > d_c$ ) then the distance of $\mathbf{u}_i$ from any previously selected center cannot exceed the cutoff distance $d_c$ . Consequently, $\mathbf{u}_i$ satisfies the condition for being a cluster center in the DBDPC algorithm, as it is farther than $d_c$ from all centers processed earlier. + +Excluded Vectors Are Not Cluster Centers Vectors within $d_{c}$ of a cluster center identified using $\delta_{i}$ have lower densities than that center, as these centers are not within $d_{c}$ to any higher density point. In the DBDPC algorithm, such vectors would not be selected as cluster centers because they are within $d_{c}$ to an already selected center, violating the distance condition. Therefore, excluding these vectors early does not affect the selection of valid cluster centers. + +Identifying Remaining Cluster Centers is Equivalent to Identifying Cluster Centers on the Reduced Set After selecting cluster centers where $\delta_{i} > d_{c}$ and excluding vectors within $d_{c}$ of these centers, we focus on the reduced set of remaining vectors for further processing. The critical observation is that the previously selected cluster centers are not within $d_{c}$ of any vector in the reduced set. This is ensured by the exclusion step, where all vectors within $d_{c}$ of these centers have been removed. Consequently, when identifying new cluster centers within the reduced set, we do not need to consider distances to the previously selected centers, as they cannot influence the selection due to their distance. Moreover, the vectors that have been excluded are not potential cluster centers themselves. Meaning that they can not influence the center selection process. This means that any vector satisfying $\delta > d_{c}$ in the reduced set, is actually not within $d_{c}$ to any higher density potential cluster center form the initial set, making it a cluster center. + +# E. On the choice of Positional IDs for clustering algorithms + +In our work, we benchmark four clustering algorithms: agglomerative clustering [1], k-means [2], Density Peaks Clustering (DPC) [5], and DBSCAN [14]. For each algorithm, we use the key vectors for clustering, apply a cosine similarity-based distance (as in DBDPC), and evaluate two strategies: merging the hidden states within each cluster or selecting the cluster center as a representative token. We report the best-performing approach for each algorithm. Similar to DBDPC, we assign the position ID of the cluster center to the resulting vectors. However, apart from DPC, the other clustering algorithms do not explicitly provide a cluster center. For k-means and agglomerative clustering, we select the cluster center as the point closest to the average of all points in the cluster, using keys and cosine similarity. For DBSCAN, we experimented with choosing the point connected to the most other points within the cluster and found this approach to yield slightly better results, aligning + +better with the principles of DBSCAN. Thus, we adopted this strategy in our tests. + +# F. More about applying ToME to Visual Language Models + +ToMe reduces the number of visual tokens at each layer of the transformer. For a given layer $i$ , the process starts by splitting the tokens into two distinct sets, A and B. Each token in set A is matched with its most similar counterpart in set B, using cosine similarity based on key vectors to determine the closest pairs. The top $r_i$ pairs with the highest similarity are then selected for merging. Connected components from the matched pairs are combined into single vectors, where hidden states are averaged. It is important to note that each connected component contains exactly one element from set B, and when applying ToME to Visual Language Models, this element's position ID is assigned to the merged token. In [6], the number of visual tokens was reduced by a fixed quantity $(r_i = r)$ . However, this fixed reduction scheme cannot achieve more than a $50\%$ reduction unless no reduction is done at later layers when the number of tokens drops below $r$ , which goes against the gradual reduction strategy proposed in ToMe. To enable higher reduction ratios, we adopt a linearly decreasing scheduler, where the reduction is higher in early layers and decreases in later layers. This approach achieves a smaller average number of visual tokens across the network while still reducing the token count at each layer, allowing us to reach high reduction ratios effectively. + +# G. Implementation details and hyperparameters for PACT + +For all experiments on LLaVA-OneVision-7B, we set $d_{n} = 2$ , $\alpha = 1.5$ , and $L = 4$ . While the optimal values of each parameter may vary depending on the dataset, we aim to evaluate the real-world effectiveness of our approach by using consistent values across all testing datasets. The results in Tab. 2 were obtained using $d_{c} = 0.21$ and $\lambda = 0.55$ while those in Tab. 1 were obtained using $d_{c} = 0.17$ and $\alpha = 0.7$ . Additionally, to demonstrate the performance of our approach at different reduction ratios, we vary $d_{c}$ and $\lambda$ and report the results. The values of the fixed parameters $d_{n}$ and $\alpha$ were chosen by performing a grid search on SeedBench [24], which is why we do not include Seed-Bench in the testing datasets. It is important to note that finding the optimal parameters for all testing datasets is not the focus of this study, as this would require extensive testing of different values for $d_{c}$ , $\lambda$ , $L$ , $\alpha$ , and $d_{n}$ on all test sets. Such an approach would not accurately reflect the real-world performance of our method. Instead, we chose to only vary $d_{c}$ and $\lambda$ to evaluate the effectiveness of our approach at different reduction ratios. When + +testing on SeedBench, we found that a pruning ratio higher than $60\%$ harms performance. Therefore, we vary the pruning ratio between $10\%$ and $60\%$ and test across different values of $d_{c}$ . When testing PACT on LLaVA-1.6-Mistral-7B, Qwen2-VL-7B-Instruct and InternVL2-8B. We use the same values of $d_{n}$ and $\alpha$ as when testing on LLaVA-OneVision-7B. We note that these hyperparameters may not be optimal; however, as we aim to test the generalizability of our approach, we opt to use the same hyperparameters across models. Figure 13, Figure 14 and Figure 15 show the maximum distance between the keys at several layers of the language model for LLaVA-1.6-Mistral-7B, Qwen2-VL-7B-Instruct and InternVL2-8B. Following the same approach for LLaVA-OneVision-7B, we choose $L = 4$ for Qwen2-VL-7B-Instruct and $L = 7$ for InternVL2-8B. We note that the choice of the reduction layer for InternVL2-8B is not as evident as for LLaVA-OneVision-7B and Qwen2-VL-7B-Instruct, as the increase in maximum distance from one layer to the next is sometimes minimal, making it unclear which layer offers the best balance between accuracy and computational efficiency. However, since we do not aim to experimentally determine the optimal reduction layer, we end up choosing $L = 7$ , as the maximum distance between keys is increased by an acceptable amount between the seventh and eighth layer. Following the same approach we use $L = 7$ for LLaVA-1.6-Mistral-7B. + +# H. More about test datasets and used metrics + +For evaluating the different approaches, we use LMMs-Eval [26] and aim to follow the same dataset splits and metrics as used in [27]. We detail the used splits and metrics in Tab. 4. Some datasets require evaluation using a GPT model through the OPENAI API or other closed-source models. However, for many datasets the version of the closed-source model used in evaluating LLaVA-OneVision in [27] is no longer available. So we use the latest version of GPT-4 for our assessments at the time of publication (gpt-4o-2024-08-06). We also observed that when calling a closed-source model like GPT-4 via an API, the responses are not fully deterministic, even with a temperature set to zero, introducing some noise into the evaluation metrics. To reduce this noise, we exclude all these datasets when testing across different reduction ratios. On the other hand, for Tab. 1, we exclude MMVet, Vibe-Eval, VideoChatGPT, MM-LiveBench, and LLaVA-Wilder as they have high inference times, which would dominate the throughput calculation. + +For certain datasets, such as DocVQA, InfoVQA, and TextVQA, we use the validation split contrary to [27]. This choice allows us to test various reduction ratios and approaches without requiring submission to the test server, which would be impractical for extensive testing. For datasets requiring a test set submission (EgoSchema and PerceptionTest), where either the validation set is typically + +![](images/2018b92a3ceb38fe58df20c32ffb33e26839e109a7d40b3c17c99039061b7f48.jpg) +Figure 13. Illustration of the maximum distance between the keys of visual tokens for the first 10 layers of LLaVA-1.6-Mistral-7B before the application of rotary embeddings. + +not used for evaluation or does not exist, we report the submission-based metrics evaluated directly on the test set. As explained above, for some datasets our evaluation setup differs from the one used for evaluating LLaVA-OneVision in [27], which may result in variations in the reported results for this model on certain datasets. This is primarily due to the use of validation splits for DocVQA, InfoVQA, and TextVQA, as well as the reliance on GPT-based metrics for some datasets (a common practice for these benchmarks, making alternative evaluation difficult). Nevertheless, our comparisons remain fair, as the same evaluation procedure is consistently applied across all approaches and reduction ratios. + +We note that when using reduction methods, results may include slight variations due to edge cases where distances or importance metrics for different vectors are equal. That's why we report results based on the average of three different runs for each dataset. + +Notably, when testing on Qwen2-VL-7B-Instruct without reduction, some datasets encountered GPU out-of-memory errors (MLVU, VideoMME, and ActivityNet Perception) which we excluded from the test set. Additionally, results on ScienceQA were quite low when tested without reduction (0.132), leading to its exclusion from testing as well. We note that, as we use LMM-Eval [26] for evaluation, results differ for some datasets from the officially reported results, as prompts are sometimes not formatted in the same manner. This observation also applies to InternVL2-8B. + +# I. Additional numerical results + +Table 8 and Tab. 9 show a comparison of DBDPC and various clustering algorithms for a reduction ratio of approximately $60\%$ on LLaVA-OneVision-7B across multiple datasets. The results demonstrate that DBDPC outperforms other clustering algorithms in visual token reduction for the + +![](images/1cf0b451a2b9e5ba443debf65c14f58ca11284800c693f17f3908ef610c04e2b.jpg) +Figure 14. Illustration of the maximum distance between the keys of visual tokens for the first 10 layers of Qwen2-VL-7B-Instruct before the application of rotary embeddings. + +majority of the datasets. Additionally, the tables show that the clustering process for DBDPC is significantly faster than that of other clustering algorithms. Table 10 presents a comparison of EUTI-based visual token pruning and FastV for a reduction ratio of approximately $60\%$ on LLaVA-OneVision-7B across various datasets. The results indicate that EUTI outperforms FastV on most datasets while also being more computationally efficient. Table 15 shows that using keys for distance calculations in DBDPC outperforms hidden states across the majority of the test datasets. Also, we present a comparison between PACT and other visual reduction techniques for InternVL2-8B, and LLaVA-1.6-Mistral-7B across different datasets in Tab. 6, and Tab. 7. + +![](images/4f0258050dc5d3c9c34a559923d1aba15b7d6193026a4795cf763cec369c5ee9.jpg) +Figure 15. Illustration of the maximum distance between the keys of visual tokens for the first 10 layers of InternVL2-8B before the application of rotary embeddings. + +# J. Ablation study : Additional numerical results + +Table 11 shows a comparison between PACT, DBDPC, and EUTI for a reduction ratio of approximately $70\%$ , applied on LLaVA-OneVision-7B. The results demonstrate that PACT, which combines both clustering and pruning, outperforms the other two methods that are either clustering-based or pruning-based across various datasets. More importantly, DBDPC and EUTI exhibit a significant drop in performance on some of the datasets, which is not the case for PACT. We note that numerical results for the ablation studies conducted on DBDPC, EUTI, and PACT can be found in Tab. 12, Tab. 13 and Tab. 14. + +Table 4. Dataset Splits, Subsets, and Evaluation Metrics Used in Our Experiments. Default indicates the use of the standard test split or cases where only one split/subset is available. The evaluation metrics employed are those commonly used for the respective datasets and generally the ones proposed in the official papers. For GPT-based scores (or any model-based scores), this means that a GPT model was used during evaluation, typically to extract answers from the generated output text, which are then matched with the ground truth to calculate accuracy using exact matches. When accuracy is reported, it generally implies that only an exact match is considered a correct answer. + +
DatasetSplitSubsetEvaluation Metric
VideoMMEDefaultNo subtitlesAccuracy
MMEDefaultDefaultMME Perception Score
DocVQAValidationDefaultANLS
MLVUDefaultDefaultAccuracy
LLaVA-InterleaveDefaultOut-domainAccuracy
ChartQAValidationDefaultRelaxed Accuracy
MMBenchValidationEnglishGPT-based Score
MuirBenchDefaultDefaultAccuracy
ScienceQADefaultVision onlyAccuracy
MMMUValidationDefaultAccuracy
AI2DDefaultDefaultAccuracy
InfographicVQAValidationDefaultANLS
MMStarDefaultDefaultAccuracy
ActivityNetQADefaultDefaultGPT-based Score
MM-LiveBenchDefault2406GPT-based Score
LLaVA-WilderDefaultSmallGPT-based Score
MathVerseDefaultVision miniGPT-based Score
MathVistaDefaultTestminiGPT-based Score
MMVetDefaultDefaultGPT-based Score
Vibe-EvalDefaultDefaultREKA-based Score
VideoChatGPTDefaultDefaultGPT-based Score
EgoSchemaDefaultDefaultSubmission
PerceptionTestDefaultMultiple Choice QASubmission
TextVQAValidationDefaultOfficial metric
+ +Table 5. Performance of PACT on LLaVA-OneVision-7B using $d_{c} = 0.17$ and $\alpha = 0.7$ . + +
DatasetPACT (Ours)
MetricRed. RatioProc. TimeAlgo. Time
VideoMME57.769.2%0.3210.021
MME1571.072.1%0.2260.017
DocVQA85.471.1%0.4670.026
MLVU64.869.2%0.3220.022
LLaVA-Interleave62.272.2%0.1330.010
ChartQA77.371.4%0.3090.019
MMBench79.972.0%0.1340.010
MuirBench42.470.9%0.1750.013
ScienceQA93.572.0%0.1300.010
MMMU48.872.6%0.1030.007
AI2D81.272.5%0.1730.013
InfographicVQA61.570.0%0.4030.023
MMStar59.572.3%0.1470.011
ActivityNetQA55.170.0%0.4090.029
MathVerse17.176.0%0.3500.021
MathVista62.173.0%0.2600.015
EgoSchema60.069.1%0.3200.021
PerceptionTest52.370.0%0.3010.023
TextVQA75.569.2%0.3200.023
+ +Table 6. Comparison of PACT with FastV, VTW, and ToME applied on InternVL2-8B on Various Datasets. + +
DatasetNo ReductionPACT (Ours)FastVVTWToME
MetricProc. TimeMetricRed. RatioProc. TimeMetricProc. TimeMetricProc. TimeMetricProc. Time
VideoMME52.20.24751.168.4%0.15151.10.15551.00.14250.20.190
MME1621.00.1711591.969.9%0.1211588.70.1181627.00.1111533.30.155
MLVU50.60.43949.768.8%0.32648.80.32549.50.33329.30.343
LLaVA-Interleave40.00.39039.071.2%0.26539.70.26339.60.23036.70.316
MMBench81.90.16180.470.4%0.11880.20.11680.20.10970.80.165
MuirBench35.70.43234.470.3%0.24935.60.25833.70.21032.70.296
ScienceQA97.10.16597.170.8%0.11895.80.11695.70.10989.90.151
MMMU48.50.16748.070.6%0.12647.70.12647.80.11947.50.156
AI2D82.50.14681.470.7%0.11278.50.11079.60.10574.40.142
MMStar59.00.17956.770.4%0.18654.20.18453.40.35255.10.156
PerceptionTest57.70.30056.866.0%0.20356.20.21334.10.19255.20.228
EgoSchema54.00.24053.767.0%0.15553.10.16332.20.14652.90.172
ActivityNet51.70.24051.366.0%0.15351.00.16130.80.14350.40.171
MM-LiveBench68.03.07567.368.0%2.14067.02.24740.42.00366.62.354
+ +Table 7. Comparison of PACT with FastV, Prumerge, and Hired applied on LLaVA-1.6-Mistral-7B across multiple datasets. + +
DatasetNo ReductionPACT (Ours)FastVPrumergeHired
MetricProc. TimeMetricRed. RatioProc. TimeMetricProc. TimeMetricProc. TimeMetricProc. Time
MME1500.00.2371507.170.3%0.1591503.90.1581485.40.1661497.00.168
DocVQA70.00.36367.167.1%0.28464.50.28148.80.29365.80.295
ChartQA52.90.33249.370.1%0.25948.90.26136.00.26446.10.266
MMBench68.20.22668.071.9%0.15567.90.15466.20.16067.60.164
ScienceQA73.00.19772.771.5%0.14473.20.14571.70.14872.90.149
MMMU34.20.23934.971.5%0.17134.70.16933.90.18033.90.180
AI2D67.50.23367.570.9%0.16067.00.15864.50.16565.90.166
InfographicVQA36.90.29435.666.2%0.22633.40.22931.90.23631.60.236
MMStar36.20.37536.771.9%0.35036.60.40035.10.34535.90.345
+ +Table 8. Comparison of DBDPC and Agglomerative Clustering Methods for a Reduction Ratio of approximately $60\%$ on LLaVA-OneVision-7B. + +
DatasetDBDPC (ours)Agg. (Single Linkage)Agg. (Average Linkage)Agg. (Complete Linkage)
MetricProc. TimeAlgo. TimeMetricProc. TimeAlgo. TimeMetricProc. TimeAlgo. TimeMetricProc. TimeAlgo. Time
VideoMME57.40.3890.04057.61.5041.14857.01.6571.31657.91.6901.350
MME1563.80.2550.0281554.10.9940.7381559.21.1230.8681563.01.1510.897
DocVQA84.70.5300.04483.61.8991.37984.42.1851.66284.32.3081.777
MLVU64.20.3840.03964.01.5741.22965.21.6751.32964.81.7001.355
LLaVA-Interleave62.10.1510.01662.00.4250.27761.50.4460.29861.40.4460.298
ChartQA76.00.3660.03174.51.1510.79875.81.2530.91075.81.2770.930
MMBench80.10.1510.01679.50.4270.27779.70.4370.29179.80.4490.299
MuirBench43.20.2150.02341.40.6670.47442.00.7270.53442.00.7380.544
ScienceQA94.70.1470.01594.80.3940.25094.70.4160.27194.70.4130.269
MMMU48.30.1100.00948.40.2180.11049.30.2320.12148.20.2250.117
AI2D80.70.2020.02280.80.6670.47280.60.7480.55180.10.7530.557
InfographicVQA61.60.5280.04657.11.6081.18159.81.8181.39459.81.8701.436
MMStar60.50.1670.01860.20.5070.34459.80.5560.39060.50.5600.395
+ +Table 9. Comparison of DBDPC, DBSCAN, DPC, and KMeans Clustering Methods for a Reduction Ratio of approximately $60\%$ on LLaVA-OneVision-7B. + +
DatasetDBDPC (ours)DBSCANDPCKMeans
MetricProc. TimeAlgo. TimeMetricProc. TimeAlgo. TimeMetricProc. TimeAlgo. TimeMetricProc. TimeAlgo. Time
VideoMME57.40.3890.04057.40.3940.04656.90.7290.39257.31.7251.383
MME1563.80.2550.0281560.30.2740.0361549.90.6370.3801549.91.2540.999
DocVQA84.70.5300.04484.20.5330.04483.00.9500.44279.62.0591.544
MLVU64.20.3840.03964.20.3910.04864.20.7270.38264.61.7251.377
LLaVA-Interleave62.10.1510.01660.40.1590.02663.90.2580.12162.30.7110.566
ChartQA76.00.3660.03175.20.3690.03475.20.7580.41574.21.3991.059
MMBench80.10.1510.01678.10.1530.02079.50.3260.17979.90.7020.552
MuirBench43.20.2150.02342.40.2190.02842.00.4660.27342.90.9550.763
ScienceQA94.70.1470.01591.20.1500.02494.30.2510.11793.40.6610.518
MMMU48.30.1100.00947.80.1300.03048.30.1870.07848.20.5000.391
AI2D80.70.2020.02279.20.2020.02280.30.4550.26481.11.0620.860
InfographicVQA61.60.5280.04654.00.5310.05256.60.9750.54757.81.7801.357
MMStar60.50.1670.01856.60.1790.02860.60.3760.21360.20.8280.661
+ +Table 10. Comparison of EUTI-based visual tokens pruning and FastV for a Reduction Ratio of approximately $60\%$ on LLaVA-OneVision-7B. + +
DatasetEUTI (Ours)FastV
MetricProc. TimeAlgo. TimeMetricProc. TimeAlgo. Time
VideoMME58.40.3510.00557.60.3810.040
MME1560.00.2560.0041570.70.2830.025
DocVQA86.50.5210.00585.30.5590.032
MLVU64.30.3550.00463.10.3910.040
LLaVA-Interleave58.90.1400.00359.70.1520.007
ChartQA78.60.3440.00478.00.3630.016
MMBench80.20.1420.00379.20.1510.005
MuirBench40.00.1910.00340.80.2040.009
ScienceQA93.60.1370.00392.30.1490.006
MMMU48.80.1010.00247.30.1100.003
AI2D81.10.1910.00380.30.2020.009
InfographicVQA63.00.4250.00560.30.4730.040
MMStar59.60.1590.00359.60.1700.007
+ +Table 11. Comparison of PACT with Standalone Methods: EUTI-based Visual Token Pruning and DBDPC Clustering Algorithm for a Reduction Ratio of approximately $70\%$ , applied on LLaVA-OneVision-7B. + +
DatasetPACTDBDPCEUTI
MetricProc. TimeAlgo. TimeMetricProc. TimeAlgo. TimeMetricProc. TimeAlgo. Time
VideoMME57.50.3210.02157.30.3420.04058.40.3050.005
MME1558.70.2260.0171543.70.2430.0281595.90.2130.004
DocVQA84.30.4670.02682.50.5000.04485.30.4560.005
MLVU64.60.3220.02263.90.3580.03964.40.2910.004
LLaVA-Interleave63.90.1330.01062.60.1490.01657.10.1270.003
ChartQA77.20.3110.01975.10.3330.03178.20.2920.004
MMBench80.20.1340.01079.70.1470.01679.60.1280.003
MuirBench42.80.1750.01343.20.2110.02339.90.1640.003
ScienceQA93.60.1300.01093.80.1420.01592.20.1230.003
MMMU48.90.1030.00747.20.1090.00948.90.0960.002
AI2D80.60.1730.01380.50.1910.02279.90.1640.003
InfographicVQA61.90.4030.02358.80.4650.04660.40.3600.005
MMStar59.50.1470.01159.50.1630.01859.20.1400.003
+ +Table 12. Ablation Studies on DBDPC-based visual token reduction for a Reduction Ratio of approximately $60\%$ on LLaVA-OneVision-7B. We report only the metrics, as processing time is similar across different approaches. When ablating the Center Position-IDs assignment, we reorder the hidden states based on the mean of the Position-IDs of the elements in each cluster and then assign position IDs sequentially. + +
DBDPCw/o Center Position-IDs assignmentw/o Proportional Attentionw/o Merging
VideoMME57.458.057.957.5
MME1563.81539.31523.81476.9
DocVQA84.728.284.283.1
MLVU64.261.263.963.5
LLaVA-Interleave62.169.663.263.6
ChartQA76.024.876.074.4
MMBench80.176.180.179.6
MuirBench43.226.543.244.0
ScienceQA94.767.494.293.6
MMMU48.334.547.648.2
AI2D80.743.080.479.9
InfographicVQA61.617.859.858.7
MMStar60.558.959.659.1
+ +Table 13. Ablation Studies on the EUTI-based Visual Token Pruning for a Reduction Ratio of approximately $70\%$ , applied on LLaVA-OneVision-7B. We report only the metrics, as processing time is similar across different approaches. + +
DatasetEUTIEUTI w/o NormNorm (EUTI w/o Global Query)
VideoMME58.457.656.6
MME1595.91573.41576.5
DocVQA85.385.179.7
MLVU64.363.063.1
LLaVA-Interleave57.157.952.9
ChartQA78.276.476.7
MMBench79.679.479.4
MuirBench40.040.539.6
ScienceQA92.291.893.5
MMMU48.949.349.2
AI2D79.979.979.7
InfographicVQA60.460.149.3
MMStar59.257.459.2
+ +Table 14. Ablation Study on Pruned Tokens Recovery for a Reduction Ratio of approximately $70\%$ . We remove the token recovery step, which is equivalent to Setting $\alpha$ to Zero. We report only the metrics, as processing time is similar across both approaches. + +
DatasetPACTPACT w/o Pruned-Token Recovery
VideoMME57.657.4
MME1556.71576.3
DocVQA84.384.3
MLVU64.664.2
LLaVA-Interleave63.959.6
ChartQA76.476.4
MMBench79.979.8
MuirBench42.842.2
ScienceQA93.393.6
MMMU48.548.5
AI2D80.680.6
InfographicVQA61.961.3
MMStar75.174.9
+ +Table 15. Ablation Study on Keys Utilization in DBDPC for a Reduction Ratio of approximately $60\%$ . Metrics are reported, as processing time is similar across both configurations. + +
DatasetDBDPCDBDPC w/o Keys
VideoMME57.4057.22
MME1563.801526.18
DocVQA84.7080.50
MLVU64.2064.60
LLaVA-Interleave62.1060.80
ChartQA76.0068.80
MMBench80.1079.21
MuirBench43.2041.40
ScienceQA94.7091.90
MMMU48.3047.90
AI2D80.7079.10
InfographicVQA61.656.70
MMStar60.5058.40
\ No newline at end of file diff --git a/data/2025/2504_08xxx/2504.08966/images/034e296c0cda8f8c270bbdf99aafdb75408397f5567fbda05c825203f40fcaa7.jpg b/data/2025/2504_08xxx/2504.08966/images/034e296c0cda8f8c270bbdf99aafdb75408397f5567fbda05c825203f40fcaa7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7d82b7c187fffd80891db79ecec23b94e37350e3 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/034e296c0cda8f8c270bbdf99aafdb75408397f5567fbda05c825203f40fcaa7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a489fbe9058c862b3975a7c9447c3e0bd546f95ca189193a4a3373e2406b87e0 +size 22647 diff --git a/data/2025/2504_08xxx/2504.08966/images/080873109efc60b65c42f2aecfac3032a68e1da59662837163d12b46e5cb4846.jpg b/data/2025/2504_08xxx/2504.08966/images/080873109efc60b65c42f2aecfac3032a68e1da59662837163d12b46e5cb4846.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c5938b07175b24d6c6253c41fe07d8ffe8fff128 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/080873109efc60b65c42f2aecfac3032a68e1da59662837163d12b46e5cb4846.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa0182fa300875d1accb810fd2e58c15d5e3a3cb6a506f8cebf8633fc7bd435d +size 11301 diff --git a/data/2025/2504_08xxx/2504.08966/images/15271f48c5f424341279adde269dbd210f51db8fd8ea993e5b4552d4548344f8.jpg b/data/2025/2504_08xxx/2504.08966/images/15271f48c5f424341279adde269dbd210f51db8fd8ea993e5b4552d4548344f8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a3abef1947d8388a35cf65a530cab178c5357317 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/15271f48c5f424341279adde269dbd210f51db8fd8ea993e5b4552d4548344f8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff64e54445eb1e6a5f3a7c32eb1c605cf85b7c0175ad2bd2040db3f805c63fdc +size 50779 diff --git a/data/2025/2504_08xxx/2504.08966/images/1cf0b451a2b9e5ba443debf65c14f58ca11284800c693f17f3908ef610c04e2b.jpg b/data/2025/2504_08xxx/2504.08966/images/1cf0b451a2b9e5ba443debf65c14f58ca11284800c693f17f3908ef610c04e2b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4fe27cd29a1d5d6b5a21eaf37f9f401891014d73 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/1cf0b451a2b9e5ba443debf65c14f58ca11284800c693f17f3908ef610c04e2b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ec5880d6d100ec0f68dca20582f313c08136c8537d8e274ab8efd61ece86d21 +size 27769 diff --git a/data/2025/2504_08xxx/2504.08966/images/1d2035a61913e11271265d7f4b0ef9642686ee68ebb89cf85ac6730e150f5f63.jpg b/data/2025/2504_08xxx/2504.08966/images/1d2035a61913e11271265d7f4b0ef9642686ee68ebb89cf85ac6730e150f5f63.jpg new file mode 100644 index 0000000000000000000000000000000000000000..333ba298404d8d7d70c1de9f44c3aae78924befb --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/1d2035a61913e11271265d7f4b0ef9642686ee68ebb89cf85ac6730e150f5f63.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:abb6c0ebfde1728dc081c13f743870eba046eac5ef29b2479d0b6d4853b5256d +size 5620 diff --git a/data/2025/2504_08xxx/2504.08966/images/1e4b5a29c5c158bc8faf221aaeebaba29da94b0f519896f6bd7fbc3df2a52aee.jpg b/data/2025/2504_08xxx/2504.08966/images/1e4b5a29c5c158bc8faf221aaeebaba29da94b0f519896f6bd7fbc3df2a52aee.jpg new file mode 100644 index 0000000000000000000000000000000000000000..73754b94ea86256b11c39b1e76565afce4eec436 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/1e4b5a29c5c158bc8faf221aaeebaba29da94b0f519896f6bd7fbc3df2a52aee.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5c54ec347556a7bb51bde82b2a7293188169fff26cde135fd103d66343d9e65 +size 81588 diff --git a/data/2025/2504_08xxx/2504.08966/images/1ead4ca5498c7b123994ccf382a10967b041bc2ad7b30217535496e774d44fd7.jpg b/data/2025/2504_08xxx/2504.08966/images/1ead4ca5498c7b123994ccf382a10967b041bc2ad7b30217535496e774d44fd7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1c2f7e3a3c3db1571a0a9fac628a2eb6fda82f79 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/1ead4ca5498c7b123994ccf382a10967b041bc2ad7b30217535496e774d44fd7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c47b85852f784ce1a1b4edcb3c70a335994f6e112837dca0eb089bb5374dc8e +size 14238 diff --git a/data/2025/2504_08xxx/2504.08966/images/1fde0b23cdce925158bf499bc98398d7386c9c58aae28864e8b31573af2b44c1.jpg b/data/2025/2504_08xxx/2504.08966/images/1fde0b23cdce925158bf499bc98398d7386c9c58aae28864e8b31573af2b44c1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..55982bb50549d79a6a1b3e7a922e619383831d92 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/1fde0b23cdce925158bf499bc98398d7386c9c58aae28864e8b31573af2b44c1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70a9b52196bff8bfbd6d0e7418dc0f21b2498db3dea706857d54d3f73834a0dd +size 4954 diff --git a/data/2025/2504_08xxx/2504.08966/images/2018b92a3ceb38fe58df20c32ffb33e26839e109a7d40b3c17c99039061b7f48.jpg b/data/2025/2504_08xxx/2504.08966/images/2018b92a3ceb38fe58df20c32ffb33e26839e109a7d40b3c17c99039061b7f48.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0cb2b91fdde20113374c520f3911b6c55f1eb004 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/2018b92a3ceb38fe58df20c32ffb33e26839e109a7d40b3c17c99039061b7f48.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5cc4e0c4cca18c3581138ae48ad918e76522b91b368f2163e6b37a8f9a4e88a8 +size 27280 diff --git a/data/2025/2504_08xxx/2504.08966/images/20af734f0650efb1295f05d68e52bb6af99dca6da5f563347afdb01dacadf413.jpg b/data/2025/2504_08xxx/2504.08966/images/20af734f0650efb1295f05d68e52bb6af99dca6da5f563347afdb01dacadf413.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0520caf44d436c53790c8c3a24ea0ef6df0c27d1 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/20af734f0650efb1295f05d68e52bb6af99dca6da5f563347afdb01dacadf413.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9674b821de2f854aa50c184a5f3573bfc0dbf1dfbaf86064e0a44e266466757 +size 7379 diff --git a/data/2025/2504_08xxx/2504.08966/images/244bf629c5d74d41ab4b46ad2bbd1f03b64ae38d10496011c5e3fdb93b71607c.jpg b/data/2025/2504_08xxx/2504.08966/images/244bf629c5d74d41ab4b46ad2bbd1f03b64ae38d10496011c5e3fdb93b71607c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..45ace12cb9e7d7f518e388a507226c966a7e4f1a --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/244bf629c5d74d41ab4b46ad2bbd1f03b64ae38d10496011c5e3fdb93b71607c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2afa4bc3c01b220d0cbcc47cc170e9f36aede2a914690a79c550c467019489fc +size 4996 diff --git a/data/2025/2504_08xxx/2504.08966/images/2582c11dad2873efdcd2901d7fd2103819051dbbc8f8385673979369d9b7138d.jpg b/data/2025/2504_08xxx/2504.08966/images/2582c11dad2873efdcd2901d7fd2103819051dbbc8f8385673979369d9b7138d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7e223ff3964c7abb9f18cc4507a809ffc58eee0d --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/2582c11dad2873efdcd2901d7fd2103819051dbbc8f8385673979369d9b7138d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c861033e5f06c57ff922224b0f9aa08673aaa0cf7d60fb47e5aae41776e33095 +size 4657 diff --git a/data/2025/2504_08xxx/2504.08966/images/29dbc9ef9e90d1c096ca3c827f33d88639e68bf65ca8cf013f9381ab98854e7a.jpg b/data/2025/2504_08xxx/2504.08966/images/29dbc9ef9e90d1c096ca3c827f33d88639e68bf65ca8cf013f9381ab98854e7a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..16c774a4b563f52a57c22317fc5a41d89b7cdc86 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/29dbc9ef9e90d1c096ca3c827f33d88639e68bf65ca8cf013f9381ab98854e7a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2651688401b2d867caa9889c97e71c276e4fe7ab69129dbf05ae3445c9367208 +size 8491 diff --git a/data/2025/2504_08xxx/2504.08966/images/2da5772f2eeadcbe64d4a7c934552b43391ca5494d5f1c72760427bf69ecdd85.jpg b/data/2025/2504_08xxx/2504.08966/images/2da5772f2eeadcbe64d4a7c934552b43391ca5494d5f1c72760427bf69ecdd85.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b0e3a3bfd897dc6030ffddafbf6f2a74e4db26b6 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/2da5772f2eeadcbe64d4a7c934552b43391ca5494d5f1c72760427bf69ecdd85.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e64b65deee4c0c2445062193482e5eae90dbaadee522db20c9712d612de38be4 +size 107704 diff --git a/data/2025/2504_08xxx/2504.08966/images/3b64ec81b0569105c87ff0f63a744fa67f3569c653b3a8ad27f5bdebd3079d89.jpg b/data/2025/2504_08xxx/2504.08966/images/3b64ec81b0569105c87ff0f63a744fa67f3569c653b3a8ad27f5bdebd3079d89.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3f4d0be4a8f39460610fda1984d150c334f5280d --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/3b64ec81b0569105c87ff0f63a744fa67f3569c653b3a8ad27f5bdebd3079d89.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3842f848849896c943321a884940a4aecfe3f2575b1f2f58e66655f33f6a7bd9 +size 7338 diff --git a/data/2025/2504_08xxx/2504.08966/images/4201dae317b57c360062832ecc40a760bf294c48a9a5ff0abb50402cd4d541d6.jpg b/data/2025/2504_08xxx/2504.08966/images/4201dae317b57c360062832ecc40a760bf294c48a9a5ff0abb50402cd4d541d6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b4a74d63b626da82a1b053733a0b12f2b907d802 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/4201dae317b57c360062832ecc40a760bf294c48a9a5ff0abb50402cd4d541d6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8b9f29853391e013ee513d8482da5b66dfac6fb9dc5ad17b5e0892f076ee568 +size 7630 diff --git a/data/2025/2504_08xxx/2504.08966/images/45e47e889cc760e7fb27c04188b2e788abf6954758da375246dc922a85502bc6.jpg b/data/2025/2504_08xxx/2504.08966/images/45e47e889cc760e7fb27c04188b2e788abf6954758da375246dc922a85502bc6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7f5b04cdb78598901c7fc4fec29efd8050f26ad4 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/45e47e889cc760e7fb27c04188b2e788abf6954758da375246dc922a85502bc6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8fed495a3be041a3bd37c391a96c5c325f468e0e984711ed119d30228e7fa7d8 +size 4480 diff --git a/data/2025/2504_08xxx/2504.08966/images/472e9c158d3825557b8be34db4c4d41ffe133f00886c06e9eb954a9bb53e45d5.jpg b/data/2025/2504_08xxx/2504.08966/images/472e9c158d3825557b8be34db4c4d41ffe133f00886c06e9eb954a9bb53e45d5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e229b91fc4a97e1141cd03186481d3c46da11b0e --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/472e9c158d3825557b8be34db4c4d41ffe133f00886c06e9eb954a9bb53e45d5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce92b1ce42106a70ec4726e0c383715491979b1f98b9457d3ac96181d43b6e78 +size 2928 diff --git a/data/2025/2504_08xxx/2504.08966/images/47d6de9bf39a12c1ede01a2d5400e2a30ae9211ca9514afd9801eed89d29348e.jpg b/data/2025/2504_08xxx/2504.08966/images/47d6de9bf39a12c1ede01a2d5400e2a30ae9211ca9514afd9801eed89d29348e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ba9db2e6ad5398745b7eec2a020b6e284c916bec --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/47d6de9bf39a12c1ede01a2d5400e2a30ae9211ca9514afd9801eed89d29348e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:929722b8eafa5b12dda8b47b9988c1075385c4c499cf2d3d179dd1d47e00ffbb +size 117713 diff --git a/data/2025/2504_08xxx/2504.08966/images/49cba0de526e2f3b78888f22f65f36bd56c061de82eb1176d616290f4294a924.jpg b/data/2025/2504_08xxx/2504.08966/images/49cba0de526e2f3b78888f22f65f36bd56c061de82eb1176d616290f4294a924.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ccb300f06cbdeff62ce9e0312187782092ce51ad --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/49cba0de526e2f3b78888f22f65f36bd56c061de82eb1176d616290f4294a924.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:330d08d9e40fcb5ab3bda32a9ebf7a256afbb7ebc6b0946c096c15d78f00a8ea +size 2286 diff --git a/data/2025/2504_08xxx/2504.08966/images/4be4dc214e5ed43b8328e96d9a6f8783868e8f810f442012991dcd739685a581.jpg b/data/2025/2504_08xxx/2504.08966/images/4be4dc214e5ed43b8328e96d9a6f8783868e8f810f442012991dcd739685a581.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a8d90d7a9517f1be56e1b7a388d344c4e8e4fec4 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/4be4dc214e5ed43b8328e96d9a6f8783868e8f810f442012991dcd739685a581.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb8bd07843b518ed59820c87e5801a1874db954d0dc546e08f8fdec5092c2e0f +size 4936 diff --git a/data/2025/2504_08xxx/2504.08966/images/4e49980de2d30f2eec82773b96f3c29fc251423c36f5807c46fec1a9888d0ae2.jpg b/data/2025/2504_08xxx/2504.08966/images/4e49980de2d30f2eec82773b96f3c29fc251423c36f5807c46fec1a9888d0ae2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1865bc979d0a1660fa33849b3f0fcae12fe49a68 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/4e49980de2d30f2eec82773b96f3c29fc251423c36f5807c46fec1a9888d0ae2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1bf0631b12c3472c13c3be20ad2fbd670a5e8d3f27fc724037986600fb36d39 +size 3267 diff --git a/data/2025/2504_08xxx/2504.08966/images/4f0258050dc5d3c9c34a559923d1aba15b7d6193026a4795cf763cec369c5ee9.jpg b/data/2025/2504_08xxx/2504.08966/images/4f0258050dc5d3c9c34a559923d1aba15b7d6193026a4795cf763cec369c5ee9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dfff3ea9f9f3b2a8b15ba09f9502f4adbf77ed7b --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/4f0258050dc5d3c9c34a559923d1aba15b7d6193026a4795cf763cec369c5ee9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:087feca6b3ed08c6e27c63c4957b79341da36da47166e2a88bc59b5b10c48125 +size 28985 diff --git a/data/2025/2504_08xxx/2504.08966/images/4fddf2bbe561b78a54a7f7ffc2b2961bf3a801236cc2b45eb20735bc0616777f.jpg b/data/2025/2504_08xxx/2504.08966/images/4fddf2bbe561b78a54a7f7ffc2b2961bf3a801236cc2b45eb20735bc0616777f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fb5f55b83f50605c536e82e5f6be68e0f6c8d778 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/4fddf2bbe561b78a54a7f7ffc2b2961bf3a801236cc2b45eb20735bc0616777f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9051afc71a1e18d6fe40a8f243cd552fe1320fc38d42671f3dfe020ec94905f9 +size 142273 diff --git a/data/2025/2504_08xxx/2504.08966/images/53c029aac0c8feed6528662a19f0a98c00dc5cad77900b4645d2e4b3090fe95c.jpg b/data/2025/2504_08xxx/2504.08966/images/53c029aac0c8feed6528662a19f0a98c00dc5cad77900b4645d2e4b3090fe95c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f1716da1b84ecd75da66705ca61ee5bfa45d7d50 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/53c029aac0c8feed6528662a19f0a98c00dc5cad77900b4645d2e4b3090fe95c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb51f1ae635dab75fd18462e5e7923001d24d1f7de793a1ce9ea8c25d02ca7fd +size 97169 diff --git a/data/2025/2504_08xxx/2504.08966/images/54e72effdf75ab7343329b8a3ac28f916953ad39803d2db1333e86fcd61af826.jpg b/data/2025/2504_08xxx/2504.08966/images/54e72effdf75ab7343329b8a3ac28f916953ad39803d2db1333e86fcd61af826.jpg new file mode 100644 index 0000000000000000000000000000000000000000..010e12d9f630bf48fc2d490eb1c5e0f2107328b0 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/54e72effdf75ab7343329b8a3ac28f916953ad39803d2db1333e86fcd61af826.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:55a0faf8059695912bed8fb577d043d4f232467950668d8edc9ad521773a3360 +size 3603 diff --git a/data/2025/2504_08xxx/2504.08966/images/5d0471b07592f17764f244fb83f43851f0af23eb77d5e45eb7c28eaf67db9494.jpg b/data/2025/2504_08xxx/2504.08966/images/5d0471b07592f17764f244fb83f43851f0af23eb77d5e45eb7c28eaf67db9494.jpg new file mode 100644 index 0000000000000000000000000000000000000000..37d5183a0191d408f01bfc27e799d15e3992f44b --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/5d0471b07592f17764f244fb83f43851f0af23eb77d5e45eb7c28eaf67db9494.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6451d851b1ae36eff3e4d53253f090d8424dda1af945a906a83f77e8754a883e +size 13550 diff --git a/data/2025/2504_08xxx/2504.08966/images/6207b2470797d0bac1ce46dc1f9930993924f2d61f1e9cfaef5a5fd6155f806f.jpg b/data/2025/2504_08xxx/2504.08966/images/6207b2470797d0bac1ce46dc1f9930993924f2d61f1e9cfaef5a5fd6155f806f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f5da47557c7dbd85dfe2dc6be9dda8f4b6e3e543 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/6207b2470797d0bac1ce46dc1f9930993924f2d61f1e9cfaef5a5fd6155f806f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fa9f25f336e046b3e7c7ac3b2f34db34a675fb59a7a7935b1899d2fa4cb0e0b +size 4199 diff --git a/data/2025/2504_08xxx/2504.08966/images/66838683c6d4d826816b766acbd7ffa79eb6b730ce42f44926fc5a77a836db69.jpg b/data/2025/2504_08xxx/2504.08966/images/66838683c6d4d826816b766acbd7ffa79eb6b730ce42f44926fc5a77a836db69.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a36fe1f1c5093a37ad9f60468914e94af38e553b --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/66838683c6d4d826816b766acbd7ffa79eb6b730ce42f44926fc5a77a836db69.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5c98a17bdd446762771fb916c9263faa1e91c064cfbe4d25f7857640c9435ff +size 23126 diff --git a/data/2025/2504_08xxx/2504.08966/images/6781bd4dacc8d2c302580b8fb7ca94d0f5aaeb7217ecb00a2d70b937c7b1cd3b.jpg b/data/2025/2504_08xxx/2504.08966/images/6781bd4dacc8d2c302580b8fb7ca94d0f5aaeb7217ecb00a2d70b937c7b1cd3b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c47bf99b7320ebf9b368faf6f82d6211fba723ad --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/6781bd4dacc8d2c302580b8fb7ca94d0f5aaeb7217ecb00a2d70b937c7b1cd3b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39043e58745cac74ba4a703cbeb73cf14e99257ccc07df5ef9a748fa4c46500e +size 163302 diff --git a/data/2025/2504_08xxx/2504.08966/images/69925927f3fbdaadf1cc8095404cb35f5bc2f8688dd8a47c3953fd4d40bf1d41.jpg b/data/2025/2504_08xxx/2504.08966/images/69925927f3fbdaadf1cc8095404cb35f5bc2f8688dd8a47c3953fd4d40bf1d41.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4f1770f99b2453e22899ec442db24da8d78975e0 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/69925927f3fbdaadf1cc8095404cb35f5bc2f8688dd8a47c3953fd4d40bf1d41.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2ad484fdd6b9f88d351a1a1942626247ad4313b356578e321370c2a0fa4bf18 +size 9011 diff --git a/data/2025/2504_08xxx/2504.08966/images/7b4dfda90299802b9ec3d227d27b3e6f0613a456c69c577ec4c432de34d926a1.jpg b/data/2025/2504_08xxx/2504.08966/images/7b4dfda90299802b9ec3d227d27b3e6f0613a456c69c577ec4c432de34d926a1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cadf50c588290099b3102c698ade570ab79b72fb --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/7b4dfda90299802b9ec3d227d27b3e6f0613a456c69c577ec4c432de34d926a1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c3c503e6bf177ddc45a27f01d2f90934e15c1c2268e43de31777d9a93d6a7cd +size 5939 diff --git a/data/2025/2504_08xxx/2504.08966/images/7c034be835ae6416aad9e6ae87aa0a942f2b6dec0befc8b2ea5c6961e0d1085d.jpg b/data/2025/2504_08xxx/2504.08966/images/7c034be835ae6416aad9e6ae87aa0a942f2b6dec0befc8b2ea5c6961e0d1085d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dc2e1104468ecc236c25d90283d6e46f3950c1df --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/7c034be835ae6416aad9e6ae87aa0a942f2b6dec0befc8b2ea5c6961e0d1085d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:606fa336c6d737fca767999b8ada9fa5e421904fdfc8eeffb4daf10aae91a182 +size 107771 diff --git a/data/2025/2504_08xxx/2504.08966/images/7c35b30d6fa0b6f7bac48158d8e6bf94a4fbad368daa8af814b0768af4faa148.jpg b/data/2025/2504_08xxx/2504.08966/images/7c35b30d6fa0b6f7bac48158d8e6bf94a4fbad368daa8af814b0768af4faa148.jpg new file mode 100644 index 0000000000000000000000000000000000000000..29b684d065bc0195f913ad8341e3c97c3efbdfc9 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/7c35b30d6fa0b6f7bac48158d8e6bf94a4fbad368daa8af814b0768af4faa148.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:54d0f62ec35d9bffcf0ddf9a4e2534862a2c26e98bd0be58f77bc457257b7161 +size 62927 diff --git a/data/2025/2504_08xxx/2504.08966/images/7ce80ee93a2ab17492c24a1274c249473d3b46d1c880f0d1a9e7ef8feed27c38.jpg b/data/2025/2504_08xxx/2504.08966/images/7ce80ee93a2ab17492c24a1274c249473d3b46d1c880f0d1a9e7ef8feed27c38.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1e46722590a269f3701b9adf150db3803f66e857 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/7ce80ee93a2ab17492c24a1274c249473d3b46d1c880f0d1a9e7ef8feed27c38.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5910df24c9a0a4301a461b4b0ee40d07a2bcceb1613a5e665d66fee6eea5a884 +size 2980 diff --git a/data/2025/2504_08xxx/2504.08966/images/7d8540bfc1f13e20032a59f4e2f73397b14276eca2d36cc26cb8541cdde345ac.jpg b/data/2025/2504_08xxx/2504.08966/images/7d8540bfc1f13e20032a59f4e2f73397b14276eca2d36cc26cb8541cdde345ac.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3161d10b8f4f7a8e6ed6af8634e415ce7d4138ff --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/7d8540bfc1f13e20032a59f4e2f73397b14276eca2d36cc26cb8541cdde345ac.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7652c3c2e43c3646b6a8bd955fdafe9957f3d7e0ca86918bbf9ed59e0d8aba07 +size 49780 diff --git a/data/2025/2504_08xxx/2504.08966/images/7f5b665c174e3e701e2cc28e0e974693248fd20f6c2143f751273dc85bd088c1.jpg b/data/2025/2504_08xxx/2504.08966/images/7f5b665c174e3e701e2cc28e0e974693248fd20f6c2143f751273dc85bd088c1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..be3a704f827d88926d56399aa827b497c2da094b --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/7f5b665c174e3e701e2cc28e0e974693248fd20f6c2143f751273dc85bd088c1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:250e7ddd324271eb5786f420ad3abafc81806aa4c38c74c9f496ab8a64a47726 +size 6516 diff --git a/data/2025/2504_08xxx/2504.08966/images/892add56fdf618d70b81707cdbf73b8a737ebba71017e7b8dc70e2a3d7ae4917.jpg b/data/2025/2504_08xxx/2504.08966/images/892add56fdf618d70b81707cdbf73b8a737ebba71017e7b8dc70e2a3d7ae4917.jpg new file mode 100644 index 0000000000000000000000000000000000000000..48ad57e9a8a596bc3cc5c2222a043ef9ff0d6c94 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/892add56fdf618d70b81707cdbf73b8a737ebba71017e7b8dc70e2a3d7ae4917.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:68ddb419d094f86802912cd9dca9ef073ceb9b241a330b2351a02458bb3d4890 +size 5503 diff --git a/data/2025/2504_08xxx/2504.08966/images/8cef1152cfc8f46979a833a4e4fbf5773ece6fb604250f4f1f1cd8d86b2b607f.jpg b/data/2025/2504_08xxx/2504.08966/images/8cef1152cfc8f46979a833a4e4fbf5773ece6fb604250f4f1f1cd8d86b2b607f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..74b06a7601b4b8236ab897f596f823f66ab3ffce --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/8cef1152cfc8f46979a833a4e4fbf5773ece6fb604250f4f1f1cd8d86b2b607f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93c9d2f261f77ceefbf25738531d25380bc5e67db3c5a3be960cc5f7adb7d37f +size 16732 diff --git a/data/2025/2504_08xxx/2504.08966/images/922cdcffc7ad49660bc707651ee240d440711902642b8a121d23064d7db11c56.jpg b/data/2025/2504_08xxx/2504.08966/images/922cdcffc7ad49660bc707651ee240d440711902642b8a121d23064d7db11c56.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0effecd22248c4b240dfb42df16a61043c0b749f --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/922cdcffc7ad49660bc707651ee240d440711902642b8a121d23064d7db11c56.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d9a66d78c21c7b071de9e4b794aea4d8e9db2f7844e273de9fc9630dbfdf6ea +size 6899 diff --git a/data/2025/2504_08xxx/2504.08966/images/935d857f27e28290132519c8ef931d40fe2dec2f6c3021d2718a77c20d124ce9.jpg b/data/2025/2504_08xxx/2504.08966/images/935d857f27e28290132519c8ef931d40fe2dec2f6c3021d2718a77c20d124ce9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..50252ccaa4f4252d3cd55d688592600da377d811 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/935d857f27e28290132519c8ef931d40fe2dec2f6c3021d2718a77c20d124ce9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f284778f0ceafbacfb280e67fb0cd0cc4570e8c2a1295cf70ddbda177770389 +size 23358 diff --git a/data/2025/2504_08xxx/2504.08966/images/991768b7b1eccb7cc3fe0e64629c7ae67379f84e5f598eb26031e9d494ff3b94.jpg b/data/2025/2504_08xxx/2504.08966/images/991768b7b1eccb7cc3fe0e64629c7ae67379f84e5f598eb26031e9d494ff3b94.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9db145a9c26e2a124cfae24f5bfcab6d1fa5b618 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/991768b7b1eccb7cc3fe0e64629c7ae67379f84e5f598eb26031e9d494ff3b94.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98e7b57a42593c38dbfc133315117bec50d06d1bfc45f97aafd8afc1c10c5ca8 +size 13200 diff --git a/data/2025/2504_08xxx/2504.08966/images/a13ddcbf4f57cce0737e9972df5ccd4e3a0e4c3b45929d11bb24821088eda9ec.jpg b/data/2025/2504_08xxx/2504.08966/images/a13ddcbf4f57cce0737e9972df5ccd4e3a0e4c3b45929d11bb24821088eda9ec.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f5686f0d7ca6c0f417cd15b05ebb856119546531 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/a13ddcbf4f57cce0737e9972df5ccd4e3a0e4c3b45929d11bb24821088eda9ec.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:42c59b8651f3ab33c6db65369fc834451a6e9f2b2ebc2a460ed5078f79ac11a8 +size 11490 diff --git a/data/2025/2504_08xxx/2504.08966/images/a811ec0995145ac4a8f871a4191fb5c4059c7f1ed7e6c0aa2b6dbe01c9915d01.jpg b/data/2025/2504_08xxx/2504.08966/images/a811ec0995145ac4a8f871a4191fb5c4059c7f1ed7e6c0aa2b6dbe01c9915d01.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9897e28e12e559453c4b9c036872ede27ba10544 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/a811ec0995145ac4a8f871a4191fb5c4059c7f1ed7e6c0aa2b6dbe01c9915d01.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5db2a72c445b842e878e3fbaadd89c991bc55e6ba87cafc182d01d9032265b75 +size 4887 diff --git a/data/2025/2504_08xxx/2504.08966/images/a8edc535de3dc0f5b92d00e7c80d2795654c78738c1f24f8ea1e73fdddad7eb1.jpg b/data/2025/2504_08xxx/2504.08966/images/a8edc535de3dc0f5b92d00e7c80d2795654c78738c1f24f8ea1e73fdddad7eb1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..914b75d032548851464b0b059b283041a4ddb774 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/a8edc535de3dc0f5b92d00e7c80d2795654c78738c1f24f8ea1e73fdddad7eb1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1952449adee9089018a5fe31a8f47d17eeb489e9144a98df758ce4a8d5fdb377 +size 4055 diff --git a/data/2025/2504_08xxx/2504.08966/images/ac03a20585c83df2d07c8acc08addbb8f9c595884eeff0f8891abb84395e3e2b.jpg b/data/2025/2504_08xxx/2504.08966/images/ac03a20585c83df2d07c8acc08addbb8f9c595884eeff0f8891abb84395e3e2b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7417131d5b8473915a5809b22b5308f983e3112e --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/ac03a20585c83df2d07c8acc08addbb8f9c595884eeff0f8891abb84395e3e2b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4897b69ef734d020ea211bd1f519c15d25ad9aea5781100ac7cb6a4611ff76a0 +size 134273 diff --git a/data/2025/2504_08xxx/2504.08966/images/b373501466d7e91d7e686c23bd0ab34b12c97e4ccb856350d0e6b824e1c308d9.jpg b/data/2025/2504_08xxx/2504.08966/images/b373501466d7e91d7e686c23bd0ab34b12c97e4ccb856350d0e6b824e1c308d9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..019a78b0b3da8f36349004dafb8092a9628aa1f5 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/b373501466d7e91d7e686c23bd0ab34b12c97e4ccb856350d0e6b824e1c308d9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aecaa1f9e3d8e25b407f1a4e956ddc9794dbcf3fc919a76e84e0ded7b2c17342 +size 104988 diff --git a/data/2025/2504_08xxx/2504.08966/images/b85f536cc482df694edec26fea498cd1df299190cf870327598c2a9ca5a099c6.jpg b/data/2025/2504_08xxx/2504.08966/images/b85f536cc482df694edec26fea498cd1df299190cf870327598c2a9ca5a099c6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..014f84f6fcf6142f11f87620cb2200820bc877e8 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/b85f536cc482df694edec26fea498cd1df299190cf870327598c2a9ca5a099c6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2aee2f4fa3ce24adad1dae64dfac403682e4b86713e2017773de6a355c7a1207 +size 8248 diff --git a/data/2025/2504_08xxx/2504.08966/images/bb81e2fa9638c44f00f88ad9a19105b302a2c6038fe67cc94c6d7ee5995711c9.jpg b/data/2025/2504_08xxx/2504.08966/images/bb81e2fa9638c44f00f88ad9a19105b302a2c6038fe67cc94c6d7ee5995711c9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..83373fa39fe968cb0d84ad7677387398c0860793 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/bb81e2fa9638c44f00f88ad9a19105b302a2c6038fe67cc94c6d7ee5995711c9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d44c45bb1e6ee5476b87833eae196454d55b63be0089b6c4355320579f24609 +size 24369 diff --git a/data/2025/2504_08xxx/2504.08966/images/bbee2722cb3ddcd6e6492ed26fea8d5f65ff30ed2c01704da099312ba5eed042.jpg b/data/2025/2504_08xxx/2504.08966/images/bbee2722cb3ddcd6e6492ed26fea8d5f65ff30ed2c01704da099312ba5eed042.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ac3418f3f8b3354bc743fc7649b66f761b3ca613 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/bbee2722cb3ddcd6e6492ed26fea8d5f65ff30ed2c01704da099312ba5eed042.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4369998ec2df8edb199e9f15b98fa16e9e44013bb3faa82c83e6e26f0961923 +size 15300 diff --git a/data/2025/2504_08xxx/2504.08966/images/bc4c9351f57e236b7889f9a925abb16135b22bfdb57da6783bf7f4536a9da928.jpg b/data/2025/2504_08xxx/2504.08966/images/bc4c9351f57e236b7889f9a925abb16135b22bfdb57da6783bf7f4536a9da928.jpg new file mode 100644 index 0000000000000000000000000000000000000000..69b3682caa6275dcfb34c4e258a7d333f2e48277 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/bc4c9351f57e236b7889f9a925abb16135b22bfdb57da6783bf7f4536a9da928.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6498913088b5f340cb211bf5cad201dc203456b180fc6f98b559126956e8c9f9 +size 7444 diff --git a/data/2025/2504_08xxx/2504.08966/images/bda2d3a72a62cb29151eb7947ad887341e88ce3a0c5f7810b01f2d4cc88cd2e6.jpg b/data/2025/2504_08xxx/2504.08966/images/bda2d3a72a62cb29151eb7947ad887341e88ce3a0c5f7810b01f2d4cc88cd2e6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d230a76c69bdfa07f4a8a197d2c52a52b9ac1070 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/bda2d3a72a62cb29151eb7947ad887341e88ce3a0c5f7810b01f2d4cc88cd2e6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:756a7137f56139196a985d13a0185f37ab289b8205255296ef72450047ccd5ab +size 3064 diff --git a/data/2025/2504_08xxx/2504.08966/images/bdb0a995ec5a4a7c8dbfa738af92a7dacabf794778b4bab1d63f6e08e0d172f5.jpg b/data/2025/2504_08xxx/2504.08966/images/bdb0a995ec5a4a7c8dbfa738af92a7dacabf794778b4bab1d63f6e08e0d172f5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f9696b72d6f21e9c25df364fa139c3ecee355b54 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/bdb0a995ec5a4a7c8dbfa738af92a7dacabf794778b4bab1d63f6e08e0d172f5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3776f1fbc953a272fb51a682bfc136bfddc04651712e2399b2ffe2d34ce868c5 +size 6202 diff --git a/data/2025/2504_08xxx/2504.08966/images/bf312f87129bcef4dc7f9c5c4aa5297fd4507bf413c59934064a9d43700ba3bf.jpg b/data/2025/2504_08xxx/2504.08966/images/bf312f87129bcef4dc7f9c5c4aa5297fd4507bf413c59934064a9d43700ba3bf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ff76e5f1c5353ddee7c7d2eab3415cd2eb676a9e --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/bf312f87129bcef4dc7f9c5c4aa5297fd4507bf413c59934064a9d43700ba3bf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1895c8b38bee5c2873a0d3ea8beb6c4f9b195e0c6ad94bcc2027ecf5af8f17d8 +size 2860 diff --git a/data/2025/2504_08xxx/2504.08966/images/c02e3429e73e82093f3754d16330140b4903f4ca16ac239e9651f581911fbed5.jpg b/data/2025/2504_08xxx/2504.08966/images/c02e3429e73e82093f3754d16330140b4903f4ca16ac239e9651f581911fbed5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7d793757c5b3dc2a594632c443d0e49311cb93f1 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/c02e3429e73e82093f3754d16330140b4903f4ca16ac239e9651f581911fbed5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2fdc47b8b7685253826d36219dd030145359d9020873fb42e991e2fc38b46cd2 +size 3166 diff --git a/data/2025/2504_08xxx/2504.08966/images/c055f89776f93fc24bbcc1f8f24389d305be1809f3b8c99f2fb4443a656c9543.jpg b/data/2025/2504_08xxx/2504.08966/images/c055f89776f93fc24bbcc1f8f24389d305be1809f3b8c99f2fb4443a656c9543.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a66d62cdb27e9a570e9e97331b2e609130977a16 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/c055f89776f93fc24bbcc1f8f24389d305be1809f3b8c99f2fb4443a656c9543.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc538670ee6d65c2e88645d9e063aab8c9d2fbf5c1bd3580a6e2caf03ae45702 +size 60868 diff --git a/data/2025/2504_08xxx/2504.08966/images/c71d96b011c757c352e6f8eebc1d66a311b0ef3c469187a7a8d88c82973eb2d9.jpg b/data/2025/2504_08xxx/2504.08966/images/c71d96b011c757c352e6f8eebc1d66a311b0ef3c469187a7a8d88c82973eb2d9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8d06428da383831438a884874d89bdf7ab8d49e7 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/c71d96b011c757c352e6f8eebc1d66a311b0ef3c469187a7a8d88c82973eb2d9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41e2b20240a4e66d69d66d93d087087adc82d5ec8df82211c3a3b746696be2a9 +size 3379 diff --git a/data/2025/2504_08xxx/2504.08966/images/c869c2029d490794dbb3bde4a68093b4736da9447da98d5fdbfcfc98533dc48b.jpg b/data/2025/2504_08xxx/2504.08966/images/c869c2029d490794dbb3bde4a68093b4736da9447da98d5fdbfcfc98533dc48b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2117cb66f984bf23ddeaabf976c10f40e09462ed --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/c869c2029d490794dbb3bde4a68093b4736da9447da98d5fdbfcfc98533dc48b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0986eb21c5c91eb6f9ff5ae50519b28b4adb321d31b399565caf1b552a71339 +size 112569 diff --git a/data/2025/2504_08xxx/2504.08966/images/ca4b3f57ad5d24131e2f9e73b023eb898595db73469ca291cc5e95389b3700c9.jpg b/data/2025/2504_08xxx/2504.08966/images/ca4b3f57ad5d24131e2f9e73b023eb898595db73469ca291cc5e95389b3700c9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c2af02e65a8db9f4634c196bb76988758f7a8f88 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/ca4b3f57ad5d24131e2f9e73b023eb898595db73469ca291cc5e95389b3700c9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aea892faa3b11023abacec5d6e08a6ba9f05cc0f8b7719c76fc1a338b19d06a3 +size 3082 diff --git a/data/2025/2504_08xxx/2504.08966/images/ca6b341b6269ae763414e7f14233818f87ac7d1254e4de7a7b732a46874d32c5.jpg b/data/2025/2504_08xxx/2504.08966/images/ca6b341b6269ae763414e7f14233818f87ac7d1254e4de7a7b732a46874d32c5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..579605c4731099567673aa4992cc4560e1847f13 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/ca6b341b6269ae763414e7f14233818f87ac7d1254e4de7a7b732a46874d32c5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2528e106715d7dc7bc2a8ccabbd250acf919711c6f688e5076e8a1ee59bfa1ca +size 4305 diff --git a/data/2025/2504_08xxx/2504.08966/images/d09f70419ec32d57ee282deb3099146e4993cb5122244a5ca35c44d6d395531b.jpg b/data/2025/2504_08xxx/2504.08966/images/d09f70419ec32d57ee282deb3099146e4993cb5122244a5ca35c44d6d395531b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3c2c6569244729fd8e8ed3533da4599bbbce22fb --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/d09f70419ec32d57ee282deb3099146e4993cb5122244a5ca35c44d6d395531b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01b4f2c0dd847ca18f775f92c1b9f205326b09c61ce11115d8f7b5586cde2f29 +size 17041 diff --git a/data/2025/2504_08xxx/2504.08966/images/d0d05d5ff1ed06ed3b1b8cee55b23fa7e8799dd820eae1b3dbb0f21733a09cda.jpg b/data/2025/2504_08xxx/2504.08966/images/d0d05d5ff1ed06ed3b1b8cee55b23fa7e8799dd820eae1b3dbb0f21733a09cda.jpg new file mode 100644 index 0000000000000000000000000000000000000000..23ebbc36e6dd42072b519c1b02104d38e269afe7 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/d0d05d5ff1ed06ed3b1b8cee55b23fa7e8799dd820eae1b3dbb0f21733a09cda.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:52f584cdfae97a3e0b264f37911286b621f2d08af1b0569d6c85e2d2f7f5a2b9 +size 12828 diff --git a/data/2025/2504_08xxx/2504.08966/images/dae7661cfa177d954013243da7ff518a0070487864e2f9d776b1ef938eb9a64d.jpg b/data/2025/2504_08xxx/2504.08966/images/dae7661cfa177d954013243da7ff518a0070487864e2f9d776b1ef938eb9a64d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f2c0aee284273ff804c7f3deb9df06cbf9daf019 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/dae7661cfa177d954013243da7ff518a0070487864e2f9d776b1ef938eb9a64d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f9bd3214ebe32aef8be8961d906b5f4fda7ac5ce675f46914cbf7c82b97a172 +size 13423 diff --git a/data/2025/2504_08xxx/2504.08966/images/e4261b04fef109488d07af722c5985ca2a3835c943c104f033a8b1cf2613810e.jpg b/data/2025/2504_08xxx/2504.08966/images/e4261b04fef109488d07af722c5985ca2a3835c943c104f033a8b1cf2613810e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2eae211130c06bbba22e90cbc71f9892937e1277 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/e4261b04fef109488d07af722c5985ca2a3835c943c104f033a8b1cf2613810e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd612f614009db31da359e7895baeb963d8c2ffa451ce7daca7a1a6f8e6e3f08 +size 7091 diff --git a/data/2025/2504_08xxx/2504.08966/images/e643be71e64e169a48e1ff88c1873c8becf45e63affe57e5dc8aded5f62fe043.jpg b/data/2025/2504_08xxx/2504.08966/images/e643be71e64e169a48e1ff88c1873c8becf45e63affe57e5dc8aded5f62fe043.jpg new file mode 100644 index 0000000000000000000000000000000000000000..26033589ec382d580ad56bec806b3b5c3bc29612 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/e643be71e64e169a48e1ff88c1873c8becf45e63affe57e5dc8aded5f62fe043.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22283eae87aff0a412b63db113d324b020da2eb75937b764fe8bffaab928efb7 +size 2441 diff --git a/data/2025/2504_08xxx/2504.08966/images/e857abe574f9c242fff52f9561bab896c74ed0ef7b6f6d5c54fa38b5589633cd.jpg b/data/2025/2504_08xxx/2504.08966/images/e857abe574f9c242fff52f9561bab896c74ed0ef7b6f6d5c54fa38b5589633cd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d58459042e413f57addcecc06007620b38414dc3 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/e857abe574f9c242fff52f9561bab896c74ed0ef7b6f6d5c54fa38b5589633cd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2bbfb82ad3eafa49ab707f935df29ffc4b1afbd8b91df1739a0b7e467b640477 +size 5051 diff --git a/data/2025/2504_08xxx/2504.08966/images/ed6b232143a7b8525b16cacf76dc3a10e834e1960654e6bfa3d7bf0f470aac2c.jpg b/data/2025/2504_08xxx/2504.08966/images/ed6b232143a7b8525b16cacf76dc3a10e834e1960654e6bfa3d7bf0f470aac2c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..76d5ace8f9ab1407a56dc38ab7fbc7bc60ffdff4 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/ed6b232143a7b8525b16cacf76dc3a10e834e1960654e6bfa3d7bf0f470aac2c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db501d9b6fd7161edaaf08fe05ba42fb6f284108e32436cb87900694c4eb53ea +size 86070 diff --git a/data/2025/2504_08xxx/2504.08966/images/ef0a6bafa197324b4d9aaa2229b29256100d45828c99fd13ad4dd15f1f16d2b4.jpg b/data/2025/2504_08xxx/2504.08966/images/ef0a6bafa197324b4d9aaa2229b29256100d45828c99fd13ad4dd15f1f16d2b4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ec6996957fe9c459ec8f31e4e3e4f0ef6bf3b13c --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/ef0a6bafa197324b4d9aaa2229b29256100d45828c99fd13ad4dd15f1f16d2b4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:027d10d773f6033d2c703d14411cf6ae73181839ba5c9137e4f5ea02a07ebefa +size 2640 diff --git a/data/2025/2504_08xxx/2504.08966/images/f368bf2a470b7eb965c3a82944f7d45511b9292e73f2461e19a596acc4fe5f82.jpg b/data/2025/2504_08xxx/2504.08966/images/f368bf2a470b7eb965c3a82944f7d45511b9292e73f2461e19a596acc4fe5f82.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eca87802c30b18a4a02249b8a8f6e4797401efbf --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/f368bf2a470b7eb965c3a82944f7d45511b9292e73f2461e19a596acc4fe5f82.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b16a4a3654c3693b8293b9f35eb331a72190d5035cde9851e625b7aadfbc07d +size 13519 diff --git a/data/2025/2504_08xxx/2504.08966/images/f8cba66984af80f0f28d03c457da7368c2e0774032fa01236c4883bc43e57b8c.jpg b/data/2025/2504_08xxx/2504.08966/images/f8cba66984af80f0f28d03c457da7368c2e0774032fa01236c4883bc43e57b8c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d804e7e47b95d76e8dd5f743d70bf1e9c5e346ad --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/images/f8cba66984af80f0f28d03c457da7368c2e0774032fa01236c4883bc43e57b8c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:790d74036402a84b68ddd8b3719085b3dcfeff7d24fa5c7f80c1145d3b089e55 +size 3204 diff --git a/data/2025/2504_08xxx/2504.08966/layout.json b/data/2025/2504_08xxx/2504.08966/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..9fdb509a5b897fdca41e03c27831bd9e112ba152 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08966/layout.json @@ -0,0 +1,19048 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 82, + 102, + 531, + 139 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 102, + 531, + 139 + ], + "spans": [ + { + "bbox": [ + 82, + 102, + 531, + 139 + ], + "type": "text", + "content": "PACT: Pruning and Clustering-Based Token Reduction for Faster Visual Language Models" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 135, + 162, + 225, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 162, + 225, + 175 + ], + "spans": [ + { + "bbox": [ + 135, + 162, + 225, + 175 + ], + "type": "text", + "content": "Mohamed Dhouib" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 76, + 175, + 285, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 175, + 285, + 204 + ], + "spans": [ + { + "bbox": [ + 76, + 175, + 285, + 204 + ], + "type": "text", + "content": "LIX, École Polytechnique, IP Paris, France \nmohamed.dhouib@polytechnique.edu" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 149, + 211, + 214, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 211, + 214, + 223 + ], + "spans": [ + { + "bbox": [ + 149, + 211, + 214, + 223 + ], + "type": "text", + "content": "Sonia Vanier" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 77, + 224, + 286, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 224, + 286, + 253 + ], + "spans": [ + { + "bbox": [ + 77, + 224, + 286, + 253 + ], + "type": "text", + "content": "LIX, École Polytechnique, IP Paris, France \nsonia.vanier@polytechnique.edu" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 380, + 162, + 462, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 380, + 162, + 462, + 175 + ], + "spans": [ + { + "bbox": [ + 380, + 162, + 462, + 175 + ], + "type": "text", + "content": "Davide Buscaldi" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 309, + 176, + 534, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 176, + 534, + 202 + ], + "spans": [ + { + "bbox": [ + 309, + 176, + 534, + 202 + ], + "type": "text", + "content": "LIPN, Université Sorbonne Paris Nord, France davide.buscaldi@lipn.univ-paris13.fr" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 384, + 211, + 459, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 384, + 211, + 459, + 224 + ], + "spans": [ + { + "bbox": [ + 384, + 211, + 459, + 224 + ], + "type": "text", + "content": "Aymen Shabou" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 310, + 225, + 533, + 238 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 225, + 533, + 238 + ], + "spans": [ + { + "bbox": [ + 310, + 225, + 533, + 238 + ], + "type": "text", + "content": "DataLab Groupe, Crédit Agricole S.A, France" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 328, + 241, + 512, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 328, + 241, + 512, + 251 + ], + "spans": [ + { + "bbox": [ + 328, + 241, + 512, + 251 + ], + "type": "text", + "content": "aymen.shabou@credit-agricole-sa.fr" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 152, + 281, + 200, + 293 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 281, + 200, + 293 + ], + "spans": [ + { + "bbox": [ + 152, + 281, + 200, + 293 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 55, + 306, + 297, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 306, + 297, + 510 + ], + "spans": [ + { + "bbox": [ + 55, + 306, + 297, + 510 + ], + "type": "text", + "content": "Visual Language Models require substantial computational resources for inference due to the additional input tokens needed to represent visual information. However, these visual tokens often contain redundant and unimportant information, resulting in an unnecessarily high number of tokens. To address this, we introduce PACT, a method that reduces inference time and memory usage by pruning irrelevant tokens and merging visually redundant ones at an early layer of the language model. Our approach uses a novel importance metric to identify unimportant tokens without relying on attention scores, making it compatible with FlashAttention. We also propose a novel clustering algorithm, called Distance Bounded Density Peak Clustering, which efficiently clusters visual tokens while constraining the distances between elements within a cluster by a predefined threshold. We demonstrate the effectiveness of PACT through extensive experiments." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 536, + 135, + 550 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 536, + 135, + 550 + ], + "spans": [ + { + "bbox": [ + 56, + 536, + 135, + 550 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 55, + 557, + 297, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 557, + 297, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 557, + 297, + 714 + ], + "type": "text", + "content": "Extending Large language models to modalities other than text [11, 18, 19, 55, 56] has seen success in recent years across various domains, especially in the visual domain with models like LLaVA [31] and Qwen-VL [4]. State-of-the-art Visual Language Models generally consist of three main components: a vision encoder, a connector, and a language model. The vision encoder converts input images into visual tokens, which are passed through the connector and then fed to the language model along with the input text. While this architecture has shown impressive performance across different tasks, it suffers from high computational cost due to the large number of visual tokens. In this paper, we introduce two complementary methods to" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 281, + 555, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 281, + 555, + 567 + ], + "spans": [ + { + "bbox": [ + 313, + 281, + 555, + 567 + ], + "type": "text", + "content": "optimize Visual Language Models by reducing inference time and memory requirements: a pruning module and a clustering algorithm. These methods can be used independently or combined, forming the PACT approach for greater effectiveness. Notably, our pruning and clustering modules, as well as PACT, are applied at inference time and thus require no additional training. The pruning module identifies unimportant visual tokens based on a novel importance metric that evaluates each token's relevance without relying on attention scores. This makes it compatible with FlashAttention [12], as FlashAttention does not support the calculation of attention scores. The second module introduces a novel clustering algorithm, Distance Bounded Density Peak Clustering (DBDPC), which clusters visual tokens while ensuring that the distances between elements within a cluster are constrained by a predefined threshold. By combining these two methods, we develop PACT. First, the pruning module eliminates unimportant tokens, then the DBDPC algorithm clusters the remaining ones. Tokens that were initially pruned but are sufficiently close to the constructed clusters are reincorporated, ensuring that valuable information from the pruned tokens is recovered. Finally, the tokens within each cluster are merged into a single representative token, reducing the total token count." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 568, + 556, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 568, + 556, + 700 + ], + "spans": [ + { + "bbox": [ + 313, + 568, + 556, + 700 + ], + "type": "text", + "content": "By combining both pruning and clustering, PACT achieves an effective visual token reduction, addressing both irrelevant and redundant tokens. When applied to LLaVA-OneVision-7B, PACT achieves a " + }, + { + "bbox": [ + 313, + 568, + 556, + 700 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 313, + 568, + 556, + 700 + ], + "type": "text", + "content": " visual token reduction with negligible performance loss. Moreover, PACT exhibits significantly less performance degradation at higher reduction ratios compared to previous methods, achieving " + }, + { + "bbox": [ + 313, + 568, + 556, + 700 + ], + "type": "inline_equation", + "content": "71.3\\%" + }, + { + "bbox": [ + 313, + 568, + 556, + 700 + ], + "type": "text", + "content": " visual token reduction ratio with only " + }, + { + "bbox": [ + 313, + 568, + 556, + 700 + ], + "type": "inline_equation", + "content": "1.4\\%" + }, + { + "bbox": [ + 313, + 568, + 556, + 700 + ], + "type": "text", + "content": " performance drop, whereas previous state-of-the-art methods show at best a " + }, + { + "bbox": [ + 313, + 568, + 556, + 700 + ], + "type": "inline_equation", + "content": "4.4\\%" + }, + { + "bbox": [ + 313, + 568, + 556, + 700 + ], + "type": "text", + "content": " performance drop at an equal reduction ratio. Our contributions are as follows:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 701, + 555, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 701, + 555, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 701, + 555, + 714 + ], + "type": "text", + "content": "- We propose a novel visual token pruning metric that does" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 208, + 37, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 208, + 37, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 208, + 37, + 559 + ], + "type": "text", + "content": "arXiv:2504.08966v1 [cs.CV] 11 Apr 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 63, + 72, + 294, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 72, + 294, + 95 + ], + "spans": [ + { + "bbox": [ + 63, + 72, + 294, + 95 + ], + "type": "text", + "content": "not rely on attention scores, ensuring compatibility with FlashAttention, and empirically validate its effectiveness." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 96, + 295, + 228 + ], + "type": "list", + "angle": 0, + "index": 3, + "blocks": [ + { + "bbox": [ + 56, + 96, + 295, + 132 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 96, + 295, + 132 + ], + "spans": [ + { + "bbox": [ + 56, + 96, + 295, + 132 + ], + "type": "text", + "content": "- We introduce a new clustering algorithm aimed at reducing visual redundancy and show its superiority over other clustering algorithms for visual token reduction." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 133, + 295, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 133, + 295, + 228 + ], + "spans": [ + { + "bbox": [ + 56, + 133, + 295, + 228 + ], + "type": "text", + "content": "- We show that combining pruning with clustering-based merging surpasses either technique alone for visual token reduction. By integrating our pruning and clustering algorithms, we propose a novel approach, PACT, and demonstrate that it outperforms previous and concurrent works [3, 6, 9, 30, 44]. The codebase used to obtain the results in this study is available at https://github.com/orailix/PACT/tree/main." + } + ] + } + ], + "index": 2 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 55, + 240, + 140, + 253 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 240, + 140, + 253 + ], + "spans": [ + { + "bbox": [ + 55, + 240, + 140, + 253 + ], + "type": "text", + "content": "2. Related work" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 261, + 188, + 274 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 261, + 188, + 274 + ], + "spans": [ + { + "bbox": [ + 55, + 261, + 188, + 274 + ], + "type": "text", + "content": "2.1. Visual language models" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 278, + 295, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 278, + 295, + 507 + ], + "spans": [ + { + "bbox": [ + 55, + 278, + 295, + 507 + ], + "type": "text", + "content": "Since the introduction of BLIP-2 [28], the use of a visual encoder followed by a connector that feeds visual vectors to the language model has become the standard architecture for Visual Language Models (VLMs) [7, 17, 50]. Recent models [10, 27, 49] have enhanced VLM architecture with high-resolution handling, which is necessary for document understanding tasks [13, 23]. LLaVA-OneVision [27] divides images into " + }, + { + "bbox": [ + 55, + 278, + 295, + 507 + ], + "type": "inline_equation", + "content": "384 \\times 384" + }, + { + "bbox": [ + 55, + 278, + 295, + 507 + ], + "type": "text", + "content": " crops, encodes each part with SigLIP [54], and uses bilinear interpolation to reduce token count up to 8,748 tokens. InternVL2 [10] splits images into " + }, + { + "bbox": [ + 55, + 278, + 295, + 507 + ], + "type": "inline_equation", + "content": "448 \\times 448" + }, + { + "bbox": [ + 55, + 278, + 295, + 507 + ], + "type": "text", + "content": " tiles, processing up to 40 tiles per image with InternViT [10], and applies pixel shuffle to reduce the number of visual tokens, producing up to 10,240 tokens. Qwen-VL2 [49] uses 2D Rotary Positional Embeddings for dynamic resolution support and merges adjacent tokens via an MLP layer, yet still requires over 10,000 tokens for high resolution images. While these models apply token reduction by merging adjacent tokens to preserve structure, they do not address token irrelevance or redundancy, limiting efficiency." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 516, + 185, + 528 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 516, + 185, + 528 + ], + "spans": [ + { + "bbox": [ + 55, + 516, + 185, + 528 + ], + "type": "text", + "content": "2.2. Visual token reduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 534, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 534, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 534, + 295, + 713 + ], + "type": "text", + "content": "Reducing the number of visual tokens in Vision Transformers (ViT) has been a key focus of the research community for several years. EViT [29] identifies and merges irrelevant tokens by relying on the attention scores between the class token ([CLS]) and visual tokens. ToME [6] proposed a simple yet effective approach that iteratively merges similar tokens throughout the ViT layers. Building on these ideas, recent efforts have extended visual token reduction techniques to VLMs. LaVIT [21] used the Gumbel-Softmax [20] to train a mask that selects tokens for retention, merging discarded tokens into retained ones via additional attention layers. LLaVA-PruMerge [44] accelerates LLAVA 1.5 [31] by leveraging the attention scores between the [CLS] token and visual tokens in the last layer of the ViT encoder to decide which tokens to retain, while HiRED [3] refines" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 72, + 555, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 555, + 468 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 555, + 468 + ], + "type": "text", + "content": "this approach by allocating token budgets based on attention from earlier layers. However, both these methods are only applicable to architectures where a ViT is used and a [CLS] token is added to the input visual sequence, making them incompatible with the majority state-of-the-art VLMs, which do not use a [CLS] token. Moreover, both methods attribute scores to tokens at the output of the visual encoder, but recent VLMs merge adjacent visual tokens before passing them to the language model. It is unclear how to attribute pre-merging scores to the resulting tokens, making LLaVA-PruMerge and HiRED inapplicable. We note that LLaVA-PruMerge mitigates information loss by merging pruned tokens with retained ones. However, it does not merge similar retained tokens; therefore, it does not address visual redundancy, a typical limitation of pruning-based methods. TRIM [46] prunes tokens based on similarity with pooled text from CLIP [42]. However, as TRIM relies on textual information for pruning, it is less suitable for multi-turn conversations where, in practice, visual tokens would be pruned solely based on the text information available during the image's forward pass, potentially losing crucial information required to answer subsequent prompts. FastV [9] evaluates token importance via average attention scores, which is not compatible with FlashAttention, adding computational overhead for recent VLMs. VTW [30] removes tokens in deeper layers. While this method shows promising results, its reduction of computational costs is limited as visual tokens are only withdrawn in later layers. These previous methods address only one of two issues: the presence of unimportant tokens or visual redundancy. In this work, we introduce PACT, a novel approach that tackles both issues simultaneously by pruning irrelevant tokens and merging visually redundant ones." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 314, + 478, + 370, + 490 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 478, + 370, + 490 + ], + "spans": [ + { + "bbox": [ + 314, + 478, + 370, + 490 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 498, + 556, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 498, + 556, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 498, + 556, + 713 + ], + "type": "text", + "content": "In this section, we present PACT, a method that aims to reduce VLMs inference time and memory usage by pruning unimportant tokens and merging visually redundant ones at an early layer " + }, + { + "bbox": [ + 313, + 498, + 556, + 713 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 313, + 498, + 556, + 713 + ], + "type": "text", + "content": " of the language model. PACT consists of three steps: First, unimportant tokens are identified. Next, the remaining tokens are clustered. Finally, tokens in each cluster, along with sufficiently close tokens that were initially discarded, are merged. PACT operates within a selected layer " + }, + { + "bbox": [ + 313, + 498, + 556, + 713 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 313, + 498, + 556, + 713 + ], + "type": "text", + "content": " of the language model and is applicable in scenarios where visual tokens are fed into the language model, regardless of the architecture of the visual encoder or connector. The three-step process of PACT is illustrated in Figure 1. We denote the hidden states at layer " + }, + { + "bbox": [ + 313, + 498, + 556, + 713 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 313, + 498, + 556, + 713 + ], + "type": "text", + "content": " by " + }, + { + "bbox": [ + 313, + 498, + 556, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{H} \\in \\mathbb{R}^{n \\times d}" + }, + { + "bbox": [ + 313, + 498, + 556, + 713 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 313, + 498, + 556, + 713 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 313, + 498, + 556, + 713 + ], + "type": "text", + "content": " is the number of visual tokens and " + }, + { + "bbox": [ + 313, + 498, + 556, + 713 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 313, + 498, + 556, + 713 + ], + "type": "text", + "content": " is the dimensionality of the hidden states. We denote by " + }, + { + "bbox": [ + 313, + 498, + 556, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{K}, \\mathbf{Q} \\in \\mathbb{R}^{n \\times n_h \\times d_h}" + }, + { + "bbox": [ + 313, + 498, + 556, + 713 + ], + "type": "text", + "content": " the key and query matrices for the visual tokens at layer " + }, + { + "bbox": [ + 313, + 498, + 556, + 713 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 313, + 498, + 556, + 713 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 313, + 498, + 556, + 713 + ], + "type": "inline_equation", + "content": "n_h" + }, + { + "bbox": [ + 313, + 498, + 556, + 713 + ], + "type": "text", + "content": " represents the number of attention heads and " + }, + { + "bbox": [ + 313, + 498, + 556, + 713 + ], + "type": "inline_equation", + "content": "d_h" + }, + { + "bbox": [ + 313, + 498, + 556, + 713 + ], + "type": "text", + "content": " is the dimensionality of each attention heads." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 82, + 70, + 529, + 226 + ], + "blocks": [ + { + "bbox": [ + 82, + 70, + 529, + 226 + ], + "lines": [ + { + "bbox": [ + 82, + 70, + 529, + 226 + ], + "spans": [ + { + "bbox": [ + 82, + 70, + 529, + 226 + ], + "type": "image", + "image_path": "c055f89776f93fc24bbcc1f8f24389d305be1809f3b8c99f2fb4443a656c9543.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 233, + 555, + 278 + ], + "lines": [ + { + "bbox": [ + 55, + 233, + 555, + 278 + ], + "spans": [ + { + "bbox": [ + 55, + 233, + 555, + 278 + ], + "type": "text", + "content": "Figure 1. Simplified illustration of PACT. This figure illustrates the three-step process of PACT: (1) First, EUTI is used to prune visual tokens deemed unimportant; (2) Then, DBDPC is applied to cluster the remaining tokens, ensuring that the distance between each token and its corresponding cluster center is smaller than the cutoff distance; (3) Finally, initially pruned tokens that are close to cluster centers are reintegrated, and the elements within each cluster are merged to form the reduced set of visual tokens." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 298, + 135, + 310 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 298, + 135, + 310 + ], + "spans": [ + { + "bbox": [ + 56, + 298, + 135, + 310 + ], + "type": "text", + "content": "Algorithm 1 EUTI" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 315, + 294, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 315, + 294, + 327 + ], + "spans": [ + { + "bbox": [ + 56, + 315, + 294, + 327 + ], + "type": "text", + "content": "Input: Hidden states " + }, + { + "bbox": [ + 56, + 315, + 294, + 327 + ], + "type": "inline_equation", + "content": "\\mathbf{H} \\in \\mathbb{R}^{n \\times d}" + }, + { + "bbox": [ + 56, + 315, + 294, + 327 + ], + "type": "text", + "content": "; key and query matrices" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 66, + 327, + 269, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 327, + 269, + 339 + ], + "spans": [ + { + "bbox": [ + 66, + 327, + 269, + 339 + ], + "type": "inline_equation", + "content": "\\mathbf{K}, \\mathbf{Q} \\in \\mathbb{R}^{n \\times n_h \\times d_h}" + }, + { + "bbox": [ + 66, + 327, + 269, + 339 + ], + "type": "text", + "content": "; pruning percentage " + }, + { + "bbox": [ + 66, + 327, + 269, + 339 + ], + "type": "inline_equation", + "content": "\\lambda \\in [0,1]" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 340, + 289, + 351 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 340, + 289, + 351 + ], + "spans": [ + { + "bbox": [ + 56, + 340, + 289, + 351 + ], + "type": "text", + "content": "Output: Sets of important and unimportant visual tokens" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 66, + 352, + 242, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 352, + 242, + 363 + ], + "spans": [ + { + "bbox": [ + 66, + 352, + 242, + 363 + ], + "type": "text", + "content": "Step 1: Calculate the global query vector" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 66, + 363, + 154, + 376 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 363, + 154, + 376 + ], + "spans": [ + { + "bbox": [ + 66, + 363, + 154, + 376 + ], + "type": "interline_equation", + "content": "\\mathbf {Q} _ {\\text {g l o b a l}} = \\frac {1}{n} \\sum_ {i = 1} ^ {n} \\mathbf {Q} _ {i}", + "image_path": "7ce80ee93a2ab17492c24a1274c249473d3b46d1c880f0d1a9e7ef8feed27c38.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 66, + 376, + 295, + 398 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 376, + 295, + 398 + ], + "spans": [ + { + "bbox": [ + 66, + 376, + 295, + 398 + ], + "type": "text", + "content": "Step 2: Compute the importance score for each visual token" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 66, + 399, + 159, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 399, + 159, + 411 + ], + "spans": [ + { + "bbox": [ + 66, + 399, + 159, + 411 + ], + "type": "text", + "content": "for all " + }, + { + "bbox": [ + 66, + 399, + 159, + 411 + ], + "type": "inline_equation", + "content": "i = 1,\\dots ,n" + }, + { + "bbox": [ + 66, + 399, + 159, + 411 + ], + "type": "text", + "content": " do" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 82, + 411, + 274, + 426 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 411, + 274, + 426 + ], + "spans": [ + { + "bbox": [ + 82, + 411, + 274, + 426 + ], + "type": "interline_equation", + "content": "s _ {i} = \\frac {1}{n _ {h}} \\sum_ {j = 1} ^ {n _ {h}} \\operatorname {S o f t m a x} \\left(\\mathbf {k} _ {i} ^ {(j)} \\cdot \\mathbf {Q} _ {\\text {g l o b a l}} ^ {(j)}\\right) \\cdot \\left\\| \\mathbf {h} _ {i} \\right\\| _ {2}", + "image_path": "bdb0a995ec5a4a7c8dbfa738af92a7dacabf794778b4bab1d63f6e08e0d172f5.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 66, + 426, + 99, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 426, + 99, + 435 + ], + "spans": [ + { + "bbox": [ + 66, + 426, + 99, + 435 + ], + "type": "text", + "content": "end for" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 66, + 437, + 295, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 437, + 295, + 459 + ], + "spans": [ + { + "bbox": [ + 66, + 437, + 295, + 459 + ], + "type": "text", + "content": "Step 3: Define sets of important and unimportant tokens" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 66, + 460, + 220, + 472 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 460, + 220, + 472 + ], + "spans": [ + { + "bbox": [ + 66, + 460, + 220, + 472 + ], + "type": "interline_equation", + "content": "S _ {\\text {i m p o r t a n t}} = \\left\\{i \\mid s _ {i} \\geq \\text {P e r c e n t i l e} (s, \\lambda) \\right\\}", + "image_path": "45e47e889cc760e7fb27c04188b2e788abf6954758da375246dc922a85502bc6.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 67, + 472, + 227, + 485 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 472, + 227, + 485 + ], + "spans": [ + { + "bbox": [ + 67, + 472, + 227, + 485 + ], + "type": "interline_equation", + "content": "S _ {\\text {u n i m p o r t a n t}} = \\{i \\mid s _ {i} < \\text {P e r c e n t i l e} (s, \\lambda) \\}", + "image_path": "a811ec0995145ac4a8f871a4191fb5c4059c7f1ed7e6c0aa2b6dbe01c9915d01.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 66, + 485, + 182, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 485, + 182, + 498 + ], + "spans": [ + { + "bbox": [ + 66, + 485, + 182, + 498 + ], + "type": "text", + "content": "Return " + }, + { + "bbox": [ + 66, + 485, + 182, + 498 + ], + "type": "inline_equation", + "content": "S_{\\text{important}}" + }, + { + "bbox": [ + 66, + 485, + 182, + 498 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 66, + 485, + 182, + 498 + ], + "type": "inline_equation", + "content": "S_{\\text{unimportant}}" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 55, + 525, + 295, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 525, + 295, + 586 + ], + "spans": [ + { + "bbox": [ + 55, + 525, + 295, + 586 + ], + "type": "text", + "content": "tion head. For simplicity, we omit the layer index in the notation. We denote the position index of a token by a subscript, while the attention head is indicated by a superscript. For instance, " + }, + { + "bbox": [ + 55, + 525, + 295, + 586 + ], + "type": "inline_equation", + "content": "\\mathbf{k}_i^{(j)}" + }, + { + "bbox": [ + 55, + 525, + 295, + 586 + ], + "type": "text", + "content": " represents the key vector corresponding to the " + }, + { + "bbox": [ + 55, + 525, + 295, + 586 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 55, + 525, + 295, + 586 + ], + "type": "text", + "content": "-th visual token and the " + }, + { + "bbox": [ + 55, + 525, + 295, + 586 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 55, + 525, + 295, + 586 + ], + "type": "text", + "content": "-th attention head." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 55, + 598, + 237, + 611 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 598, + 237, + 611 + ], + "spans": [ + { + "bbox": [ + 55, + 598, + 237, + 611 + ], + "type": "text", + "content": "3.1. Unimportant tokens identification" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 55, + 618, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 618, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 618, + 295, + 713 + ], + "type": "text", + "content": "A straightforward approach to identifying unimportant tokens at a certain layer " + }, + { + "bbox": [ + 55, + 618, + 295, + 713 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 55, + 618, + 295, + 713 + ], + "type": "text", + "content": " of the used language model is to define the importance of each token as the total attention score that a given token receives from all other tokens [9]. However, this method has three main drawbacks. First, current VLMs utilize FlashAttention [12], which does not support outputting attention scores. Secondly, attention scores are computed with masking, which introduces" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 299, + 555, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 299, + 555, + 502 + ], + "spans": [ + { + "bbox": [ + 313, + 299, + 555, + 502 + ], + "type": "text", + "content": "biases. Tokens at the end of a sequence tend to receive lower average attention scores since fewer tokens attend to them. Calculating the average attention score for each token based solely on the tokens that attend to it can mitigate this masking effect but introduces a new bias: end-of-sequence tokens may exhibit higher scores as they receive attention mainly from nearby tokens. This leads to either earlier or later tokens being pruned more frequently, as shown in Fig. 2. Such positional bias should be avoided, as pruning should depend solely on the information that visual tokens hold, not their position. Finally, relying only on keys and queries at a single layer to determine an importance metric may fail to fully capture the significance of visual tokens across all layers of the language model, mainly because each self-attention layer focuses on different aspects of the visual tokens. To address this, we propose an importance metric that incorporates the accumulated in" + } + ] + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 321, + 527, + 422, + 590 + ], + "blocks": [ + { + "bbox": [ + 321, + 527, + 422, + 590 + ], + "lines": [ + { + "bbox": [ + 321, + 527, + 422, + 590 + ], + "spans": [ + { + "bbox": [ + 321, + 527, + 422, + 590 + ], + "type": "image", + "image_path": "080873109efc60b65c42f2aecfac3032a68e1da59662837163d12b46e5cb4846.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 315, + 601, + 433, + 622 + ], + "lines": [ + { + "bbox": [ + 315, + 601, + 433, + 622 + ], + "spans": [ + { + "bbox": [ + 315, + 601, + 433, + 622 + ], + "type": "text", + "content": "(a) Average attention scores as a function of Position IDs." + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 443, + 528, + 542, + 590 + ], + "blocks": [ + { + "bbox": [ + 443, + 528, + 542, + 590 + ], + "lines": [ + { + "bbox": [ + 443, + 528, + 542, + 590 + ], + "spans": [ + { + "bbox": [ + 443, + 528, + 542, + 590 + ], + "type": "image", + "image_path": "29dbc9ef9e90d1c096ca3c827f33d88639e68bf65ca8cf013f9381ab98854e7a.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 435, + 601, + 553, + 633 + ], + "lines": [ + { + "bbox": [ + 435, + 601, + 553, + 633 + ], + "spans": [ + { + "bbox": [ + 435, + 601, + 553, + 633 + ], + "type": "text", + "content": "(b) Average attention scores relative to non-masked tokens as a function of Position IDs." + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 313, + 644, + 555, + 710 + ], + "lines": [ + { + "bbox": [ + 313, + 644, + 555, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 644, + 555, + 710 + ], + "type": "text", + "content": "Figure 2. Illustration of the bias induced by the use of the average attention scores across visual tokens as a pruning metric. In (a), averaging attention over all tokens favors earlier tokens, leading to pruning later tokens more frequently. In (b), averaging only over attending tokens reverses the bias, leading to earlier tokens being pruned more often." + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 65, + 72, + 286, + 202 + ], + "blocks": [ + { + "bbox": [ + 65, + 72, + 286, + 202 + ], + "lines": [ + { + "bbox": [ + 65, + 72, + 286, + 202 + ], + "spans": [ + { + "bbox": [ + 65, + 72, + 286, + 202 + ], + "type": "image", + "image_path": "d09f70419ec32d57ee282deb3099146e4993cb5122244a5ca35c44d6d395531b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 214, + 295, + 237 + ], + "lines": [ + { + "bbox": [ + 55, + 214, + 295, + 237 + ], + "spans": [ + { + "bbox": [ + 55, + 214, + 295, + 237 + ], + "type": "text", + "content": "Figure 3. Illustration of visual token norm statistics at the fourth layer of LLaVA-OneVision-7B." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 54, + 255, + 296, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 255, + 296, + 448 + ], + "spans": [ + { + "bbox": [ + 54, + 255, + 296, + 448 + ], + "type": "text", + "content": "formation from the hidden states and the layer-specific information from the keys and queries at an early layer " + }, + { + "bbox": [ + 54, + 255, + 296, + 448 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 54, + 255, + 296, + 448 + ], + "type": "text", + "content": ". We refer to this method as Efficient Unimportant Tokens Identification (EUTI). We speculate that the norm of hidden states can provide critical information about the importance of each visual token, as they reflect how much information a particular token carries through the network. Figure 3 presents statistics on the hidden state norms of visual tokens at the fourth layer of LLaVA-OneVision-7B, indicating a high variance. This variance suggests that certain visual tokens accumulate more information through residual connections and may therefore be more important for subsequent calculations. To leverage information from both hidden state norms and the key and query vectors, we first compute a global query vector " + }, + { + "bbox": [ + 54, + 255, + 296, + 448 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}_{\\mathrm{global}}" + }, + { + "bbox": [ + 54, + 255, + 296, + 448 + ], + "type": "text", + "content": " as the average of all query vectors across visual tokens:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 134, + 453, + 295, + 484 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 453, + 295, + 484 + ], + "spans": [ + { + "bbox": [ + 134, + 453, + 295, + 484 + ], + "type": "interline_equation", + "content": "\\mathbf {Q} _ {\\text {g l o b a l}} = \\frac {1}{n} \\sum_ {i = 1} ^ {n} \\mathbf {Q} _ {i} \\tag {1}", + "image_path": "a8edc535de3dc0f5b92d00e7c80d2795654c78738c1f24f8ea1e73fdddad7eb1.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 490, + 295, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 490, + 295, + 514 + ], + "spans": [ + { + "bbox": [ + 55, + 490, + 295, + 514 + ], + "type": "text", + "content": "This vector represents the overall query information requested by visual tokens at layer " + }, + { + "bbox": [ + 55, + 490, + 295, + 514 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 55, + 490, + 295, + 514 + ], + "type": "text", + "content": " across all attention" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 66, + 530, + 286, + 666 + ], + "blocks": [ + { + "bbox": [ + 66, + 530, + 286, + 666 + ], + "lines": [ + { + "bbox": [ + 66, + 530, + 286, + 666 + ], + "spans": [ + { + "bbox": [ + 66, + 530, + 286, + 666 + ], + "type": "image", + "image_path": "66838683c6d4d826816b766acbd7ffa79eb6b730ce42f44926fc5a77a836db69.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 677, + 295, + 711 + ], + "lines": [ + { + "bbox": [ + 55, + 677, + 295, + 711 + ], + "spans": [ + { + "bbox": [ + 55, + 677, + 295, + 711 + ], + "type": "text", + "content": "Figure 4. Illustration of the maximum distance between the keys of visual tokens for the first 10 layers of LLaVA-OneVision-7B before the application of rotary embeddings." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 72, + 554, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 554, + 144 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 554, + 144 + ], + "type": "text", + "content": "heads. The importance score for each visual token is then computed by first taking the dot product between its key and the global query for each attention head. A softmax is applied across visual tokens within each attention head, followed by averaging across attention heads. The final score is obtained by scaling the result with the hidden state norm:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 341, + 159, + 554, + 194 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 341, + 159, + 554, + 194 + ], + "spans": [ + { + "bbox": [ + 341, + 159, + 554, + 194 + ], + "type": "interline_equation", + "content": "s _ {i} = \\frac {1}{n _ {h}} \\sum_ {j = 1} ^ {n _ {h}} \\operatorname {S o f t m a x} \\left(\\mathbf {k} _ {i} ^ {(j)} \\cdot \\mathbf {Q} _ {\\text {g l o b a l}} ^ {(j)}\\right) \\cdot \\| \\mathbf {h} _ {i} \\| _ {2} \\tag {2}", + "image_path": "4201dae317b57c360062832ecc40a760bf294c48a9a5ff0abb50402cd4d541d6.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 198, + 554, + 245 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 198, + 554, + 245 + ], + "spans": [ + { + "bbox": [ + 313, + 198, + 554, + 245 + ], + "type": "text", + "content": "Then, we divide the visual tokens into important and unimportant tokens, using a parameter " + }, + { + "bbox": [ + 313, + 198, + 554, + 245 + ], + "type": "inline_equation", + "content": "\\lambda \\in [0,1]" + }, + { + "bbox": [ + 313, + 198, + 554, + 245 + ], + "type": "text", + "content": " to control the percentage of tokens deemed unimportant. The two sets are defined as follows:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 355, + 253, + 555, + 268 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 355, + 253, + 555, + 268 + ], + "spans": [ + { + "bbox": [ + 355, + 253, + 555, + 268 + ], + "type": "interline_equation", + "content": "S _ {\\text {i m p o r t a n t}} = \\left\\{i \\mid s _ {i} \\geq \\text {P e r c e n t i l e} (s, \\lambda) \\right\\} \\tag {3}", + "image_path": "4be4dc214e5ed43b8328e96d9a6f8783868e8f810f442012991dcd739685a581.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 353, + 272, + 554, + 286 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 353, + 272, + 554, + 286 + ], + "spans": [ + { + "bbox": [ + 353, + 272, + 554, + 286 + ], + "type": "interline_equation", + "content": "S _ {\\text {u n i m p o r t a n t}} = \\left\\{i \\mid s _ {i} < \\text {P e r c e n t i l e} (s, \\lambda) \\right\\} \\tag {4}", + "image_path": "e857abe574f9c242fff52f9561bab896c74ed0ef7b6f6d5c54fa38b5589633cd.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 289, + 554, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 289, + 554, + 373 + ], + "spans": [ + { + "bbox": [ + 313, + 289, + 554, + 373 + ], + "type": "text", + "content": "Unimportant tokens can be pruned, or the resulting sets can be combined with a clustering algorithm to further reduce the number of visual tokens, as we will show in the next section. The full EUTI algorithm is illustrated in Algorithm 1. We note that in the case where Rotary Embeddings are used [47], we use the keys and queries before their application to avoid any positional bias." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 379, + 534, + 392 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 379, + 534, + 392 + ], + "spans": [ + { + "bbox": [ + 313, + 379, + 534, + 392 + ], + "type": "text", + "content": "3.2. Clustering-based merging of visual tokens" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 396, + 554, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 396, + 554, + 492 + ], + "spans": [ + { + "bbox": [ + 313, + 396, + 554, + 492 + ], + "type": "text", + "content": "Distance Bounded Density Peak Clustering Relying solely on the importance scores presented above to prune unimportant tokens can lead to a significant reduction in visual tokens, retaining only important ones. However, redundant information may still be present across retained visual tokens. Therefore, we propose merging the redundant visual tokens using a clustering algorithm. We desire our clustering algorithm to have the following characteristics:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 494, + 553, + 529 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 314, + 494, + 440, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 494, + 440, + 505 + ], + "spans": [ + { + "bbox": [ + 314, + 494, + 440, + 505 + ], + "type": "text", + "content": "(a) Low computational time." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 506, + 553, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 506, + 553, + 529 + ], + "spans": [ + { + "bbox": [ + 313, + 506, + 553, + 529 + ], + "type": "text", + "content": "(b) Avoid assigning points that are far from each other, in terms of feature similarity, into the same cluster." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "text" + }, + { + "type": "table", + "bbox": [ + 315, + 594, + 554, + 632 + ], + "blocks": [ + { + "bbox": [ + 313, + 540, + 554, + 584 + ], + "lines": [ + { + "bbox": [ + 313, + 540, + 554, + 584 + ], + "spans": [ + { + "bbox": [ + 313, + 540, + 554, + 584 + ], + "type": "text", + "content": "Table 1. Throughput ratio, reduction ratio, and GPU memory usage for PACT, FastV, VTW, and ToME applied to LLaVA-OneVision-7B. Results are reported at a " + }, + { + "bbox": [ + 313, + 540, + 554, + 584 + ], + "type": "inline_equation", + "content": "98.6\\%" + }, + { + "bbox": [ + 313, + 540, + 554, + 584 + ], + "type": "text", + "content": " Approach-to-Reference Metric Ratio." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 315, + 594, + 554, + 632 + ], + "lines": [ + { + "bbox": [ + 315, + 594, + 554, + 632 + ], + "spans": [ + { + "bbox": [ + 315, + 594, + 554, + 632 + ], + "type": "table", + "html": "
No reductionPACT (ours)FastVVTWToME
Reduction Ratio0%71.3%50%25%40%
LLM Throughput Ratio100%225%165%160%137%
GPU Maximum Memory Consumption (GB)27.419.0530.419.221.4
", + "image_path": "8cef1152cfc8f46979a833a4e4fbf5773ece6fb604250f4f1f1cd8d86b2b607f.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "table_body" + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 641, + 554, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 641, + 554, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 641, + 554, + 713 + ], + "type": "text", + "content": "Condition (b) ensures that outliers are not assigned to distant cluster centers, as we speculate that these outliers contain important information and should only be merged with nearby outliers or remain as single points in separate clusters. Condition (b) also guarantees that points in each cluster will be relatively close to each other, which minimizes" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 296, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 296, + 191 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 296, + 191 + ], + "type": "text", + "content": "information loss when assigning a single vector as their representative. The Density Peaks Clustering (DPC) algorithm [5] is appealing in this context because it satisfies condition (a), unlike iterative clustering algorithms like k-means [2]. However, DPC does not satisfy condition (b) as it can form large clusters where boundary points may be distant from each other. The same issue arises with other algorithms such as DBSCAN [14]. Therefore, we propose a new clustering algorithm, which we call Distance Bounded Density Peaks Clustering (DBDPC)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 192, + 296, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 192, + 296, + 266 + ], + "spans": [ + { + "bbox": [ + 55, + 192, + 296, + 266 + ], + "type": "text", + "content": "DBDPC takes as input a set of vectors " + }, + { + "bbox": [ + 55, + 192, + 296, + 266 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{u}_i\\in \\mathbb{R}^{d_1}\\}_{i = 1}^q" + }, + { + "bbox": [ + 55, + 192, + 296, + 266 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 55, + 192, + 296, + 266 + ], + "type": "inline_equation", + "content": "q,d_{1}\\in \\mathbb{N}^{+}" + }, + { + "bbox": [ + 55, + 192, + 296, + 266 + ], + "type": "text", + "content": ", and outputs a set of clusters. Our algorithm's output depends on two parameters, the cutoff distance " + }, + { + "bbox": [ + 55, + 192, + 296, + 266 + ], + "type": "inline_equation", + "content": "d_c\\in \\mathbb{R}^+" + }, + { + "bbox": [ + 55, + 192, + 296, + 266 + ], + "type": "text", + "content": " and a normalization factor " + }, + { + "bbox": [ + 55, + 192, + 296, + 266 + ], + "type": "inline_equation", + "content": "d_{n}\\in \\mathbb{R}^{+}" + }, + { + "bbox": [ + 55, + 192, + 296, + 266 + ], + "type": "text", + "content": ", as well as a distance function " + }, + { + "bbox": [ + 55, + 192, + 296, + 266 + ], + "type": "inline_equation", + "content": "d:\\mathbb{R}^{d_1}\\times \\mathbb{R}^{d_1}\\to \\mathbb{R}^+" + }, + { + "bbox": [ + 55, + 192, + 296, + 266 + ], + "type": "text", + "content": ". We define the distance between two vectors " + }, + { + "bbox": [ + 55, + 192, + 296, + 266 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_i" + }, + { + "bbox": [ + 55, + 192, + 296, + 266 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 192, + 296, + 266 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_j" + }, + { + "bbox": [ + 55, + 192, + 296, + 266 + ], + "type": "text", + "content": " as:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 101, + 274, + 296, + 299 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 101, + 274, + 296, + 299 + ], + "spans": [ + { + "bbox": [ + 101, + 274, + 296, + 299 + ], + "type": "interline_equation", + "content": "d _ {i j} = d \\left(\\mathbf {u} _ {i}, \\mathbf {u} _ {j}\\right) = 1 - \\frac {\\mathbf {u} _ {i} \\cdot \\mathbf {u} _ {j}}{\\| \\mathbf {u} _ {i} \\| _ {2} \\| \\mathbf {u} _ {j} \\| _ {2}} \\tag {5}", + "image_path": "1d2035a61913e11271265d7f4b0ef9642686ee68ebb89cf85ac6730e150f5f63.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 308, + 225, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 308, + 225, + 319 + ], + "spans": [ + { + "bbox": [ + 55, + 308, + 225, + 319 + ], + "type": "text", + "content": "Then the local density " + }, + { + "bbox": [ + 55, + 308, + 225, + 319 + ], + "type": "inline_equation", + "content": "\\rho_{i}" + }, + { + "bbox": [ + 55, + 308, + 225, + 319 + ], + "type": "text", + "content": " is calculated as:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 137, + 330, + 295, + 356 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 330, + 295, + 356 + ], + "spans": [ + { + "bbox": [ + 137, + 330, + 295, + 356 + ], + "type": "interline_equation", + "content": "\\rho_ {i} = \\sum_ {j} e ^ {- d _ {i j} / d _ {n}} \\tag {6}", + "image_path": "c02e3429e73e82093f3754d16330140b4903f4ca16ac239e9651f581911fbed5.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 365, + 296, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 365, + 296, + 628 + ], + "spans": [ + { + "bbox": [ + 55, + 365, + 296, + 628 + ], + "type": "text", + "content": "We process the " + }, + { + "bbox": [ + 55, + 365, + 296, + 628 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_i" + }, + { + "bbox": [ + 55, + 365, + 296, + 628 + ], + "type": "text", + "content": " vectors from highest to lowest " + }, + { + "bbox": [ + 55, + 365, + 296, + 628 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 55, + 365, + 296, + 628 + ], + "type": "text", + "content": " values and designate a vector as a cluster center if its minimum distance from already selected centers is greater than " + }, + { + "bbox": [ + 55, + 365, + 296, + 628 + ], + "type": "inline_equation", + "content": "d_c" + }, + { + "bbox": [ + 55, + 365, + 296, + 628 + ], + "type": "text", + "content": ". Each vector " + }, + { + "bbox": [ + 55, + 365, + 296, + 628 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_i" + }, + { + "bbox": [ + 55, + 365, + 296, + 628 + ], + "type": "text", + "content": " is then assigned to the cluster of the closest center. Our algorithm guarantees that the distance from each vector to its cluster center is less than " + }, + { + "bbox": [ + 55, + 365, + 296, + 628 + ], + "type": "inline_equation", + "content": "d_c" + }, + { + "bbox": [ + 55, + 365, + 296, + 628 + ], + "type": "text", + "content": ", thereby satisfying condition (b) stated above. The full DBDPC algorithm is detailed in Algorithm 2. The center identification process in DBDPC ensures that inter-cluster distances are upper-bounded by " + }, + { + "bbox": [ + 55, + 365, + 296, + 628 + ], + "type": "inline_equation", + "content": "2d_c \\times (2 - d_c)" + }, + { + "bbox": [ + 55, + 365, + 296, + 628 + ], + "type": "text", + "content": " while distances between cluster centers are lower-bounded by " + }, + { + "bbox": [ + 55, + 365, + 296, + 628 + ], + "type": "inline_equation", + "content": "d_c" + }, + { + "bbox": [ + 55, + 365, + 296, + 628 + ], + "type": "text", + "content": ", which we formally prove in Appendix B. We note that several parts of our algorithm are presented as for-loops for clarity. However, all computations are parallelizable on GPU, as there are no dependencies between the elements of each loop, except for the part where we select cluster centers. For this part, we use a recursive algorithm that efficiently identifies an initial set of centers and discarded vectors, thereby reducing the number of vectors to be processed. We explain this in detail in Appendix D. For a comparison between DBDPC and DPC, as well as a qualitative comparison with other clustering algorithms, refer to Appendix C." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 630, + 295, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 630, + 295, + 640 + ], + "spans": [ + { + "bbox": [ + 55, + 630, + 295, + 640 + ], + "type": "text", + "content": "Which vectors should be used for distance calculation?" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 642, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 642, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 642, + 296, + 713 + ], + "type": "text", + "content": "As previously discussed, the DBDPC algorithm operates on a set of vectors that are used for distance calculation. To achieve effective clustering, the dot product between these vectors needs to accurately reflect the similarity between the corresponding visual tokens. Fortunately, transformers address this issue through the QKV self-attention mechanism." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 315, + 72, + 406, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 72, + 406, + 83 + ], + "spans": [ + { + "bbox": [ + 315, + 72, + 406, + 83 + ], + "type": "text", + "content": "Algorithm 2 DBDPC" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 314, + 88, + 555, + 113 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 88, + 555, + 113 + ], + "spans": [ + { + "bbox": [ + 314, + 88, + 555, + 113 + ], + "type": "text", + "content": "Input: Cutoff distance " + }, + { + "bbox": [ + 314, + 88, + 555, + 113 + ], + "type": "inline_equation", + "content": "d_{c} \\in \\mathbb{R}^{+}" + }, + { + "bbox": [ + 314, + 88, + 555, + 113 + ], + "type": "text", + "content": ", normalization factor " + }, + { + "bbox": [ + 314, + 88, + 555, + 113 + ], + "type": "inline_equation", + "content": "d_{n} \\in \\mathbb{R}^{+}" + }, + { + "bbox": [ + 314, + 88, + 555, + 113 + ], + "type": "text", + "content": ", set of vectors " + }, + { + "bbox": [ + 314, + 88, + 555, + 113 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{u}_i \\in \\mathbb{R}^{d_1}\\}_{i=1}^q" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 314, + 113, + 554, + 136 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 113, + 554, + 136 + ], + "spans": [ + { + "bbox": [ + 314, + 113, + 554, + 136 + ], + "type": "text", + "content": "Output: Cluster center indices " + }, + { + "bbox": [ + 314, + 113, + 554, + 136 + ], + "type": "inline_equation", + "content": "C_{\\text{centers}}" + }, + { + "bbox": [ + 314, + 113, + 554, + 136 + ], + "type": "text", + "content": ", element indices in each cluster " + }, + { + "bbox": [ + 314, + 113, + 554, + 136 + ], + "type": "inline_equation", + "content": "C_{\\text{elements}}" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 325, + 137, + 423, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 137, + 423, + 148 + ], + "spans": [ + { + "bbox": [ + 325, + 137, + 423, + 148 + ], + "type": "text", + "content": "for all pairs " + }, + { + "bbox": [ + 325, + 137, + 423, + 148 + ], + "type": "inline_equation", + "content": "(\\mathbf{u}_i,\\mathbf{u}_j)" + }, + { + "bbox": [ + 325, + 137, + 423, + 148 + ], + "type": "text", + "content": " do" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 340, + 148, + 427, + 163 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 340, + 148, + 427, + 163 + ], + "spans": [ + { + "bbox": [ + 340, + 148, + 427, + 163 + ], + "type": "interline_equation", + "content": "d _ {i j} = 1 - \\frac {\\mathbf {u} _ {i} \\cdot \\mathbf {u} _ {j}}{\\| \\mathbf {u} _ {i} \\| _ {2} \\| \\mathbf {u} _ {j} \\| _ {2}}", + "image_path": "472e9c158d3825557b8be34db4c4d41ffe133f00886c06e9eb954a9bb53e45d5.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 325, + 163, + 358, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 163, + 358, + 171 + ], + "spans": [ + { + "bbox": [ + 325, + 163, + 358, + 171 + ], + "type": "text", + "content": "end for" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 325, + 174, + 410, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 174, + 410, + 185 + ], + "spans": [ + { + "bbox": [ + 325, + 174, + 410, + 185 + ], + "type": "text", + "content": "for all vectors " + }, + { + "bbox": [ + 325, + 174, + 410, + 185 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_i" + }, + { + "bbox": [ + 325, + 174, + 410, + 185 + ], + "type": "text", + "content": " do" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 340, + 185, + 424, + 199 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 340, + 185, + 424, + 199 + ], + "spans": [ + { + "bbox": [ + 340, + 185, + 424, + 199 + ], + "type": "interline_equation", + "content": "\\rho_ {i} = \\sum_ {j = 1} ^ {q} e ^ {- d _ {i j} / d _ {n}}", + "image_path": "ef0a6bafa197324b4d9aaa2229b29256100d45828c99fd13ad4dd15f1f16d2b4.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 325, + 199, + 358, + 207 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 199, + 358, + 207 + ], + "spans": [ + { + "bbox": [ + 325, + 199, + 358, + 207 + ], + "type": "text", + "content": "end for" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 325, + 209, + 553, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 209, + 553, + 233 + ], + "spans": [ + { + "bbox": [ + 325, + 209, + 553, + 233 + ], + "type": "text", + "content": "Sort vectors by " + }, + { + "bbox": [ + 325, + 209, + 553, + 233 + ], + "type": "inline_equation", + "content": "\\rho_{i}" + }, + { + "bbox": [ + 325, + 209, + 553, + 233 + ], + "type": "text", + "content": " in descending order, obtaining indices " + }, + { + "bbox": [ + 325, + 209, + 553, + 233 + ], + "type": "inline_equation", + "content": "[i_1,i_2,\\dots ,i_q]" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 325, + 233, + 504, + 245 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 233, + 504, + 245 + ], + "spans": [ + { + "bbox": [ + 325, + 233, + 504, + 245 + ], + "type": "text", + "content": "Initialize " + }, + { + "bbox": [ + 325, + 233, + 504, + 245 + ], + "type": "inline_equation", + "content": "C_{\\mathrm{centers}} = \\{i_1\\}" + }, + { + "bbox": [ + 325, + 233, + 504, + 245 + ], + "type": "inline_equation", + "content": "C_{\\mathrm{elements}} = \\{i_1:\\emptyset \\}" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 325, + 246, + 468, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 246, + 468, + 256 + ], + "spans": [ + { + "bbox": [ + 325, + 246, + 468, + 256 + ], + "type": "text", + "content": "for all indices " + }, + { + "bbox": [ + 325, + 246, + 468, + 256 + ], + "type": "inline_equation", + "content": "i_k" + }, + { + "bbox": [ + 325, + 246, + 468, + 256 + ], + "type": "text", + "content": " in sorted order do" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 340, + 258, + 460, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 340, + 258, + 460, + 270 + ], + "spans": [ + { + "bbox": [ + 340, + 258, + 460, + 270 + ], + "type": "text", + "content": "if " + }, + { + "bbox": [ + 340, + 258, + 460, + 270 + ], + "type": "inline_equation", + "content": "\\min_{s\\in C_{\\mathrm{centers}}}d_{i_ks} > d_c" + }, + { + "bbox": [ + 340, + 258, + 460, + 270 + ], + "type": "text", + "content": " then" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 355, + 270, + 454, + 281 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 355, + 270, + 454, + 281 + ], + "spans": [ + { + "bbox": [ + 355, + 270, + 454, + 281 + ], + "type": "interline_equation", + "content": "C _ {\\text {c e n t e r s}} = C _ {\\text {c e n t e r s}} \\cup \\left\\{i _ {k} \\right\\}", + "image_path": "ca4b3f57ad5d24131e2f9e73b023eb898595db73469ca291cc5e95389b3700c9.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 356, + 281, + 421, + 293 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 356, + 281, + 421, + 293 + ], + "spans": [ + { + "bbox": [ + 356, + 281, + 421, + 293 + ], + "type": "interline_equation", + "content": "C _ {\\text {e l e m e n t s}} \\left[ i _ {k} \\right] = \\emptyset", + "image_path": "49cba0de526e2f3b78888f22f65f36bd56c061de82eb1176d616290f4294a924.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 340, + 294, + 367, + 303 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 340, + 294, + 367, + 303 + ], + "spans": [ + { + "bbox": [ + 340, + 294, + 367, + 303 + ], + "type": "text", + "content": "end if" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 325, + 306, + 358, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 306, + 358, + 315 + ], + "spans": [ + { + "bbox": [ + 325, + 306, + 358, + 315 + ], + "type": "text", + "content": "end for" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 325, + 317, + 402, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 317, + 402, + 327 + ], + "spans": [ + { + "bbox": [ + 325, + 317, + 402, + 327 + ], + "type": "text", + "content": "for all indices " + }, + { + "bbox": [ + 325, + 317, + 402, + 327 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 325, + 317, + 402, + 327 + ], + "type": "text", + "content": " do" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 340, + 330, + 438, + 341 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 340, + 330, + 438, + 341 + ], + "spans": [ + { + "bbox": [ + 340, + 330, + 438, + 341 + ], + "type": "inline_equation", + "content": "s_i = \\text{argmin}_{s \\in C_{\\text{centers}}} d_{is}" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 340, + 341, + 471, + 354 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 340, + 341, + 471, + 354 + ], + "spans": [ + { + "bbox": [ + 340, + 341, + 471, + 354 + ], + "type": "interline_equation", + "content": "C _ {\\text {e l e m e n t s}} [ s _ {i} ] = C _ {\\text {e l e m e n t s}} [ s _ {i} ] \\cup \\{i \\}", + "image_path": "6207b2470797d0bac1ce46dc1f9930993924f2d61f1e9cfaef5a5fd6155f806f.jpg" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 325, + 354, + 358, + 362 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 354, + 358, + 362 + ], + "spans": [ + { + "bbox": [ + 325, + 354, + 358, + 362 + ], + "type": "text", + "content": "end for" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 325, + 365, + 424, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 365, + 424, + 376 + ], + "spans": [ + { + "bbox": [ + 325, + 365, + 424, + 376 + ], + "type": "text", + "content": "Return " + }, + { + "bbox": [ + 325, + 365, + 424, + 376 + ], + "type": "inline_equation", + "content": "C_{\\mathrm{centers}}" + }, + { + "bbox": [ + 325, + 365, + 424, + 376 + ], + "type": "inline_equation", + "content": "C_{\\mathrm{elements}}" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 313, + 401, + 555, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 401, + 555, + 449 + ], + "spans": [ + { + "bbox": [ + 313, + 401, + 555, + 449 + ], + "type": "text", + "content": "Specifically, the key vectors " + }, + { + "bbox": [ + 313, + 401, + 555, + 449 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 313, + 401, + 555, + 449 + ], + "type": "text", + "content": " provide a meaningful representation of each token, tailored for dot product similarity. Therefore, we will use the key vectors in the DBDPC algorithm. Formally, we have:" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 366, + 458, + 555, + 472 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 366, + 458, + 555, + 472 + ], + "spans": [ + { + "bbox": [ + 366, + 458, + 555, + 472 + ], + "type": "interline_equation", + "content": "C _ {\\text {c e n t e r s}}, C _ {\\text {e l e m e n t s}} = \\mathrm {D B D P C} \\left(K ^ {\\prime}\\right) \\tag {7}", + "image_path": "2582c11dad2873efdcd2901d7fd2103819051dbbc8f8385673979369d9b7138d.jpg" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 313, + 481, + 554, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 481, + 554, + 506 + ], + "spans": [ + { + "bbox": [ + 313, + 481, + 554, + 506 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 481, + 554, + 506 + ], + "type": "inline_equation", + "content": "K' = \\{\\mathbf{u}_i \\in K \\mid i \\in S_{\\text{important}}\\}" + }, + { + "bbox": [ + 313, + 481, + 554, + 506 + ], + "type": "text", + "content": " is the subset of keys consisting of elements with indices in " + }, + { + "bbox": [ + 313, + 481, + 554, + 506 + ], + "type": "inline_equation", + "content": "S_{\\text{important}}" + }, + { + "bbox": [ + 313, + 481, + 554, + 506 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 313, + 506, + 554, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 506, + 554, + 616 + ], + "spans": [ + { + "bbox": [ + 313, + 506, + 554, + 616 + ], + "type": "text", + "content": "What about unimportant tokens near cluster centers? Tokens initially deemed unimportant but close enough to cluster centers have a high probability of being mislabeled. We add these tokens to the corresponding cluster to limit information loss. Formally, we define a threshold based on a coefficient " + }, + { + "bbox": [ + 313, + 506, + 554, + 616 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 313, + 506, + 554, + 616 + ], + "type": "text", + "content": ", where any token " + }, + { + "bbox": [ + 313, + 506, + 554, + 616 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_i" + }, + { + "bbox": [ + 313, + 506, + 554, + 616 + ], + "type": "text", + "content": ", initially excluded, is added to the cluster of the closest center " + }, + { + "bbox": [ + 313, + 506, + 554, + 616 + ], + "type": "inline_equation", + "content": "s \\in C_{\\text{centers}}" + }, + { + "bbox": [ + 313, + 506, + 554, + 616 + ], + "type": "text", + "content": " if its distance to the center satisfies " + }, + { + "bbox": [ + 313, + 506, + 554, + 616 + ], + "type": "inline_equation", + "content": "d_{is} < \\alpha \\cdot d_c" + }, + { + "bbox": [ + 313, + 506, + 554, + 616 + ], + "type": "text", + "content": ". Specifically, the new cluster elements set " + }, + { + "bbox": [ + 313, + 506, + 554, + 616 + ], + "type": "inline_equation", + "content": "C_{\\text{elements}}^{(s)}" + }, + { + "bbox": [ + 313, + 506, + 554, + 616 + ], + "type": "text", + "content": " is updated as follows:" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 322, + 625, + 553, + 644 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 322, + 625, + 553, + 644 + ], + "spans": [ + { + "bbox": [ + 322, + 625, + 553, + 644 + ], + "type": "interline_equation", + "content": "S _ {\\text {a d d e d}} ^ {(s)} = \\left\\{i \\in S _ {\\text {u n i m p o r t a n t}} \\mid s = \\operatorname {a r g m i n} _ {s ^ {\\prime} \\in C _ {\\text {c e n t e r s}}} d _ {i s ^ {\\prime}} \\right. \\tag {8}", + "image_path": "7f5b665c174e3e701e2cc28e0e974693248fd20f6c2143f751273dc85bd088c1.jpg" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 435, + 643, + 509, + 655 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 435, + 643, + 509, + 655 + ], + "spans": [ + { + "bbox": [ + 435, + 643, + 509, + 655 + ], + "type": "text", + "content": "and " + }, + { + "bbox": [ + 435, + 643, + 509, + 655 + ], + "type": "inline_equation", + "content": "d_{is} < \\alpha \\cdot d_c\\}" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 375, + 668, + 553, + 685 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 375, + 668, + 553, + 685 + ], + "spans": [ + { + "bbox": [ + 375, + 668, + 553, + 685 + ], + "type": "interline_equation", + "content": "C _ {\\text {e l e m e n t s}} ^ {(s)} \\leftarrow C _ {\\text {e l e m e n t s}} ^ {(s)} \\cup S _ {\\text {a d d e d}} ^ {(s)} \\tag {9}", + "image_path": "1fde0b23cdce925158bf499bc98398d7386c9c58aae28864e8b31573af2b44c1.jpg" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 313, + 689, + 555, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 689, + 555, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 689, + 555, + 714 + ], + "type": "text", + "content": "Merging Finally, the hidden states corresponding to the elements in each cluster are merged. Formally, the merged" + } + ] + } + ], + "index": 37 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 38 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 137, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 137, + 83 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 137, + 83 + ], + "type": "text", + "content": "Algorithm 3 PACT" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 88, + 296, + 381 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 56, + 88, + 296, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 88, + 296, + 137 + ], + "spans": [ + { + "bbox": [ + 56, + 88, + 296, + 137 + ], + "type": "text", + "content": "Input: Hidden states " + }, + { + "bbox": [ + 56, + 88, + 296, + 137 + ], + "type": "inline_equation", + "content": "\\mathbf{H} = [\\mathbf{h}_1, \\dots, \\mathbf{h}_n] \\in \\mathbb{R}^{n \\times d}" + }, + { + "bbox": [ + 56, + 88, + 296, + 137 + ], + "type": "text", + "content": "; key and query matrices " + }, + { + "bbox": [ + 56, + 88, + 296, + 137 + ], + "type": "inline_equation", + "content": "\\mathbf{K}, \\mathbf{Q} \\in \\mathbb{R}^{n \\times n_h \\times d_h}" + }, + { + "bbox": [ + 56, + 88, + 296, + 137 + ], + "type": "text", + "content": "; position IDs " + }, + { + "bbox": [ + 56, + 88, + 296, + 137 + ], + "type": "inline_equation", + "content": "\\mathbf{P} = [p_1, \\dots, p_n]" + }, + { + "bbox": [ + 56, + 88, + 296, + 137 + ], + "type": "text", + "content": "; pruning percentage " + }, + { + "bbox": [ + 56, + 88, + 296, + 137 + ], + "type": "inline_equation", + "content": "\\lambda \\in [0, 1]" + }, + { + "bbox": [ + 56, + 88, + 296, + 137 + ], + "type": "text", + "content": "; cutoff distance " + }, + { + "bbox": [ + 56, + 88, + 296, + 137 + ], + "type": "inline_equation", + "content": "d_c > 0" + }, + { + "bbox": [ + 56, + 88, + 296, + 137 + ], + "type": "text", + "content": "; tolerance coefficient " + }, + { + "bbox": [ + 56, + 88, + 296, + 137 + ], + "type": "inline_equation", + "content": "\\alpha > 0" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 137, + 284, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 137, + 284, + 148 + ], + "spans": [ + { + "bbox": [ + 56, + 137, + 284, + 148 + ], + "type": "text", + "content": "Output: Merged hidden states " + }, + { + "bbox": [ + 56, + 137, + 284, + 148 + ], + "type": "inline_equation", + "content": "\\mathbf{H}'" + }, + { + "bbox": [ + 56, + 137, + 284, + 148 + ], + "type": "text", + "content": "; new position IDs " + }, + { + "bbox": [ + 56, + 137, + 284, + 148 + ], + "type": "inline_equation", + "content": "\\mathbf{P}'" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 66, + 149, + 286, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 149, + 286, + 160 + ], + "spans": [ + { + "bbox": [ + 66, + 149, + 286, + 160 + ], + "type": "text", + "content": "Step 1: Identify important and unimportant tokens" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 66, + 161, + 248, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 161, + 248, + 173 + ], + "spans": [ + { + "bbox": [ + 66, + 161, + 248, + 173 + ], + "type": "inline_equation", + "content": "S_{\\mathrm{important}}" + }, + { + "bbox": [ + 66, + 161, + 248, + 173 + ], + "type": "inline_equation", + "content": "S_{\\mathrm{unimportant}}\\gets \\mathrm{EUTI}(\\mathbf{H},\\mathbf{K},\\mathbf{Q},p)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 66, + 173, + 266, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 173, + 266, + 183 + ], + "spans": [ + { + "bbox": [ + 66, + 173, + 266, + 183 + ], + "type": "text", + "content": "Step 2: Cluster important tokens with DBDPC" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 66, + 184, + 195, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 184, + 195, + 196 + ], + "spans": [ + { + "bbox": [ + 66, + 184, + 195, + 196 + ], + "type": "inline_equation", + "content": "\\mathbf{K}^{\\prime}\\gets \\{\\mathbf{k}_{i}\\in \\mathbf{K}\\mid i\\in S_{\\mathrm{important}}\\}" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 66, + 197, + 219, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 197, + 219, + 209 + ], + "spans": [ + { + "bbox": [ + 66, + 197, + 219, + 209 + ], + "type": "inline_equation", + "content": "C_{\\mathrm{centers}}" + }, + { + "bbox": [ + 66, + 197, + 219, + 209 + ], + "type": "inline_equation", + "content": "C_{\\mathrm{elements}}\\gets \\mathrm{DBDPC}(\\mathbf{K}^{\\prime},d_{c})" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 66, + 209, + 294, + 231 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 209, + 294, + 231 + ], + "spans": [ + { + "bbox": [ + 66, + 209, + 294, + 231 + ], + "type": "text", + "content": "Step 3: Assign unimportant tokens to sufficiently close clusters." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 66, + 232, + 166, + 244 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 232, + 166, + 244 + ], + "spans": [ + { + "bbox": [ + 66, + 232, + 166, + 244 + ], + "type": "text", + "content": "for all " + }, + { + "bbox": [ + 66, + 232, + 166, + 244 + ], + "type": "inline_equation", + "content": "i\\in S_{\\mathrm{unimportant}}" + }, + { + "bbox": [ + 66, + 232, + 166, + 244 + ], + "type": "text", + "content": " do" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 82, + 246, + 156, + 256 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 246, + 156, + 256 + ], + "spans": [ + { + "bbox": [ + 82, + 246, + 156, + 256 + ], + "type": "inline_equation", + "content": "s_i\\gets argmin_s d_{is}" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 82, + 257, + 160, + 268 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 257, + 160, + 268 + ], + "spans": [ + { + "bbox": [ + 82, + 257, + 160, + 268 + ], + "type": "text", + "content": "if " + }, + { + "bbox": [ + 82, + 257, + 160, + 268 + ], + "type": "inline_equation", + "content": "d_{isi} < \\alpha .d_c" + }, + { + "bbox": [ + 82, + 257, + 160, + 268 + ], + "type": "text", + "content": " then" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 97, + 269, + 202, + 283 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 269, + 202, + 283 + ], + "spans": [ + { + "bbox": [ + 97, + 269, + 202, + 283 + ], + "type": "inline_equation", + "content": "C_{\\mathrm{elements}}^{(s_i)} \\gets C_{\\mathrm{elements}}^{(s_i)} \\cup \\{i\\}" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 82, + 284, + 107, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 284, + 107, + 293 + ], + "spans": [ + { + "bbox": [ + 82, + 284, + 107, + 293 + ], + "type": "text", + "content": "end if" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 66, + 295, + 99, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 295, + 99, + 304 + ], + "spans": [ + { + "bbox": [ + 66, + 295, + 99, + 304 + ], + "type": "text", + "content": "end for" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 66, + 306, + 288, + 318 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 306, + 288, + 318 + ], + "spans": [ + { + "bbox": [ + 66, + 306, + 288, + 318 + ], + "type": "text", + "content": "Step 4: Merge hidden states and assign position IDs" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 66, + 319, + 154, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 319, + 154, + 330 + ], + "spans": [ + { + "bbox": [ + 66, + 319, + 154, + 330 + ], + "type": "text", + "content": "for all " + }, + { + "bbox": [ + 66, + 319, + 154, + 330 + ], + "type": "inline_equation", + "content": "s\\in C_{\\mathrm{centers}}" + }, + { + "bbox": [ + 66, + 319, + 154, + 330 + ], + "type": "text", + "content": " do" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 82, + 331, + 196, + 347 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 331, + 196, + 347 + ], + "spans": [ + { + "bbox": [ + 82, + 331, + 196, + 347 + ], + "type": "inline_equation", + "content": "\\mathbf{h}_s^{\\prime}\\gets \\frac{1}{|C_{\\mathrm{elements}}^{(s)}|}\\sum_{i\\in C_{\\mathrm{elements}}^{(s)}}\\mathbf{h}_i" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 82, + 348, + 116, + 359 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 348, + 116, + 359 + ], + "spans": [ + { + "bbox": [ + 82, + 348, + 116, + 359 + ], + "type": "inline_equation", + "content": "p_s^\\prime \\gets p_s" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 66, + 360, + 99, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 360, + 99, + 369 + ], + "spans": [ + { + "bbox": [ + 66, + 360, + 99, + 369 + ], + "type": "text", + "content": "end for" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 66, + 370, + 128, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 370, + 128, + 381 + ], + "spans": [ + { + "bbox": [ + 66, + 370, + 128, + 381 + ], + "type": "text", + "content": "Return H', P'" + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 55, + 407, + 180, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 407, + 180, + 418 + ], + "spans": [ + { + "bbox": [ + 55, + 407, + 180, + 418 + ], + "type": "text", + "content": "hidden states are computed as:" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 64, + 426, + 295, + 467 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 426, + 295, + 467 + ], + "spans": [ + { + "bbox": [ + 64, + 426, + 295, + 467 + ], + "type": "interline_equation", + "content": "\\mathbf {H} ^ {\\prime} = \\left\\{\\frac {1}{| C _ {\\text {e l e m e n t s}} ^ {(j)} |} \\sum_ {i \\in C _ {\\text {e l e m e n t s}} ^ {(j)}} \\mathbf {h} _ {i} \\mid C _ {\\text {e l e m e n t s}} ^ {(j)} \\in C _ {\\text {e l e m e n t s}} \\right\\} \\tag {10}", + "image_path": "69925927f3fbdaadf1cc8095404cb35f5bc2f8688dd8a47c3953fd4d40bf1d41.jpg" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 55, + 474, + 296, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 474, + 296, + 665 + ], + "spans": [ + { + "bbox": [ + 55, + 474, + 296, + 665 + ], + "type": "text", + "content": "Defining the position IDs Accurately assigning position IDs to each vector in the new hidden states " + }, + { + "bbox": [ + 55, + 474, + 296, + 665 + ], + "type": "inline_equation", + "content": "\\mathbf{H}^{\\prime}" + }, + { + "bbox": [ + 55, + 474, + 296, + 665 + ], + "type": "text", + "content": " is crucial, especially for models using Rotary embeddings, as these IDs determine the input image structure or the temporal dependencies of the input video. In order to achieve a low statistical discrepancy compared to regular inference, we assign the position ID for each vector from " + }, + { + "bbox": [ + 55, + 474, + 296, + 665 + ], + "type": "inline_equation", + "content": "H^{\\prime}" + }, + { + "bbox": [ + 55, + 474, + 296, + 665 + ], + "type": "text", + "content": " as its corresponding cluster center. The full PACT pipeline is shown in Algorithm 3. When Rotary Embeddings are used, DBDPC uses the keys after these embeddings are applied, whereas EUTI uses the keys and queries before applying these embeddings. For clarity, we omit this detail in Algorithm 3. We also note that both DBDPC and EUTI, as well as PACT, do not use textual tokens. Therefore, visual token reduction is performed independently of the textual context, making our method well-suited for multi-turn conversations." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 55, + 665, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 665, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 665, + 296, + 714 + ], + "type": "text", + "content": "Proportional attention Merging tokens reduces their influence in the attention mechanism and can therefore deteriorate performance if many important tokens are merged together. To mitigate this, we employ proportional attention." + } + ] + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 316, + 70, + 434, + 168 + ], + "blocks": [ + { + "bbox": [ + 316, + 70, + 434, + 168 + ], + "lines": [ + { + "bbox": [ + 316, + 70, + 434, + 168 + ], + "spans": [ + { + "bbox": [ + 316, + 70, + 434, + 168 + ], + "type": "image", + "image_path": "d0d05d5ff1ed06ed3b1b8cee55b23fa7e8799dd820eae1b3dbb0f21733a09cda.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 189, + 555, + 222 + ], + "lines": [ + { + "bbox": [ + 313, + 189, + 555, + 222 + ], + "spans": [ + { + "bbox": [ + 313, + 189, + 555, + 222 + ], + "type": "text", + "content": "Figure 5. Comparison between PACT, DBDPC, and EUTI against other visual token reduction methods across various reduction ratios applied on LLaVA-OneVision-7B." + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 436, + 71, + 553, + 168 + ], + "blocks": [ + { + "bbox": [ + 436, + 71, + 553, + 168 + ], + "lines": [ + { + "bbox": [ + 436, + 71, + 553, + 168 + ], + "spans": [ + { + "bbox": [ + 436, + 71, + 553, + 168 + ], + "type": "image", + "image_path": "dae7661cfa177d954013243da7ff518a0070487864e2f9d776b1ef938eb9a64d.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + } + ], + "index": 27 + }, + { + "bbox": [ + 313, + 243, + 554, + 278 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 243, + 554, + 278 + ], + "spans": [ + { + "bbox": [ + 313, + 243, + 554, + 278 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 313, + 243, + 554, + 278 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 313, + 243, + 554, + 278 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 243, + 554, + 278 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 313, + 243, + 554, + 278 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 313, + 243, + 554, + 278 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 313, + 243, + 554, + 278 + ], + "type": "text", + "content": " denote the keys, queries, and values at a layer " + }, + { + "bbox": [ + 313, + 243, + 554, + 278 + ], + "type": "inline_equation", + "content": "L'" + }, + { + "bbox": [ + 313, + 243, + 554, + 278 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 313, + 243, + 554, + 278 + ], + "type": "inline_equation", + "content": "L' \\geq L" + }, + { + "bbox": [ + 313, + 243, + 554, + 278 + ], + "type": "text", + "content": ". For each attention head " + }, + { + "bbox": [ + 313, + 243, + 554, + 278 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 313, + 243, + 554, + 278 + ], + "type": "text", + "content": ", the attention scores are calculated as follows:" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 332, + 284, + 554, + 312 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 332, + 284, + 554, + 312 + ], + "spans": [ + { + "bbox": [ + 332, + 284, + 554, + 312 + ], + "type": "interline_equation", + "content": "A ^ {(j)} = \\operatorname {s o f t m a x} \\left(\\frac {Q ^ {(j)} K ^ {(j) \\top}}{\\sqrt {d _ {l ^ {\\prime}}}} + \\log \\mathbf {W} + \\mathbf {B}\\right) \\tag {11}", + "image_path": "3b64ec81b0569105c87ff0f63a744fa67f3569c653b3a8ad27f5bdebd3079d89.jpg" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 313, + 319, + 555, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 319, + 555, + 474 + ], + "spans": [ + { + "bbox": [ + 313, + 319, + 555, + 474 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 319, + 555, + 474 + ], + "type": "inline_equation", + "content": "d_{l'}" + }, + { + "bbox": [ + 313, + 319, + 555, + 474 + ], + "type": "text", + "content": " is the dimensionality of the query for each attention head. Here, " + }, + { + "bbox": [ + 313, + 319, + 555, + 474 + ], + "type": "inline_equation", + "content": "\\mathbf{W}" + }, + { + "bbox": [ + 313, + 319, + 555, + 474 + ], + "type": "text", + "content": " is a matrix representing the weight of each token, and " + }, + { + "bbox": [ + 313, + 319, + 555, + 474 + ], + "type": "inline_equation", + "content": "\\mathbf{B}" + }, + { + "bbox": [ + 313, + 319, + 555, + 474 + ], + "type": "text", + "content": " is the attention mask. Specifically, for visual tokens, " + }, + { + "bbox": [ + 313, + 319, + 555, + 474 + ], + "type": "inline_equation", + "content": "w_{i_0,i_1}" + }, + { + "bbox": [ + 313, + 319, + 555, + 474 + ], + "type": "text", + "content": " represents the size of the cluster corresponding to token " + }, + { + "bbox": [ + 313, + 319, + 555, + 474 + ], + "type": "inline_equation", + "content": "i_1" + }, + { + "bbox": [ + 313, + 319, + 555, + 474 + ], + "type": "text", + "content": ", for any value of " + }, + { + "bbox": [ + 313, + 319, + 555, + 474 + ], + "type": "inline_equation", + "content": "i_0" + }, + { + "bbox": [ + 313, + 319, + 555, + 474 + ], + "type": "text", + "content": ". For each textual token at position " + }, + { + "bbox": [ + 313, + 319, + 555, + 474 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 313, + 319, + 555, + 474 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 319, + 555, + 474 + ], + "type": "inline_equation", + "content": "w_{i_0,t} = 1" + }, + { + "bbox": [ + 313, + 319, + 555, + 474 + ], + "type": "text", + "content": ", as they remain unmerged, retaining a weight of one. By scaling the attention scores based on " + }, + { + "bbox": [ + 313, + 319, + 555, + 474 + ], + "type": "inline_equation", + "content": "\\mathbf{W}" + }, + { + "bbox": [ + 313, + 319, + 555, + 474 + ], + "type": "text", + "content": ", the model effectively treats each visual token as if it represents multiple tokens. We note that when using proportional attention, we use PyTorch's scaled dot-product attention, which produces similar results to the official FlashAttention implementation while supporting custom masks." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 313, + 475, + 554, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 475, + 554, + 594 + ], + "spans": [ + { + "bbox": [ + 313, + 475, + 554, + 594 + ], + "type": "text", + "content": "Selecting the layer " + }, + { + "bbox": [ + 313, + 475, + 554, + 594 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 313, + 475, + 554, + 594 + ], + "type": "text", + "content": " for token reduction: To ensure maximum computational gain, we must choose an early layer " + }, + { + "bbox": [ + 313, + 475, + 554, + 594 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 313, + 475, + 554, + 594 + ], + "type": "text", + "content": " for visual token reduction. However, we also require that the keys at the selected layer are not too similar, allowing for effective clustering and pruning. Thus, we select the earliest layer where the maximum distance between keys is sufficiently high. Figure 4 shows that in the initial layers of LLaVA-OneVision-7B, the keys corresponding to visual tokens are quite similar, indicating a lack of distinctive features necessary for effective pruning and clustering." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 313, + 605, + 395, + 617 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 605, + 395, + 617 + ], + "spans": [ + { + "bbox": [ + 313, + 605, + 395, + 617 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 313, + 624, + 429, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 624, + 429, + 635 + ], + "spans": [ + { + "bbox": [ + 313, + 624, + 429, + 635 + ], + "type": "text", + "content": "4.1. Evaluation datasets" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 313, + 641, + 554, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 641, + 554, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 641, + 554, + 714 + ], + "type": "text", + "content": "We evaluate the effectiveness of PACT using diverse benchmarks, similar to those used for LLaVA-OneVision-7B, covering single-image, multi-image, and video tasks. We use AI2D [22], TextVQA [45], ChartQA [37], DocVQA [38], and InfographicVQA [39] to assess PACT's ability to reduce visual tokens while maintaining performance" + } + ] + } + ], + "index": 35 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 733, + 308, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 733, + 308, + 742 + ], + "spans": [ + { + "bbox": [ + 302, + 733, + 308, + 742 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 36 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 57, + 70, + 294, + 168 + ], + "blocks": [ + { + "bbox": [ + 57, + 70, + 294, + 168 + ], + "lines": [ + { + "bbox": [ + 57, + 70, + 294, + 168 + ], + "spans": [ + { + "bbox": [ + 57, + 70, + 294, + 168 + ], + "type": "image", + "image_path": "bb81e2fa9638c44f00f88ad9a19105b302a2c6038fe67cc94c6d7ee5995711c9.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 177, + 295, + 210 + ], + "lines": [ + { + "bbox": [ + 55, + 177, + 295, + 210 + ], + "spans": [ + { + "bbox": [ + 55, + 177, + 295, + 210 + ], + "type": "text", + "content": "Figure 6. Comparison between PACT and other visual token reduction methods across various reduction ratios applied on Qwen2-VL-7B-Instruct." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 54, + 231, + 295, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 231, + 295, + 399 + ], + "spans": [ + { + "bbox": [ + 54, + 231, + 295, + 399 + ], + "type": "text", + "content": "in text-rich documents. To test reasoning across multiple disciplines, we use MME [15], MMBench [32], MMVet [51], MathVerse [57], MathVista [34], MMMU [53], MMStar [8], and ScienceQA [33]. Additionally, Vibe-Eval [40], MM-LiveBench [26], and LLaVA-Bench-Wilder [25] evaluate its robustness in real-world scenarios and visual chat contexts. We use LLaVA-Interleave Bench [25] and MuirBench [48] to examine PACT's efficiency in token reduction while preserving inter-image reasoning. To assess performance in video comprehension tasks, we use ActivityNet-QA [52], MLVU [58], VideoMME [16], EgoSchema [36], and PerceptionTest [41]. Finally, Video-ChatGPT [35] evaluates the method's effectiveness in dialogue-based video interaction." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 406, + 157, + 418 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 406, + 157, + 418 + ], + "spans": [ + { + "bbox": [ + 55, + 406, + 157, + 418 + ], + "type": "text", + "content": "4.2. Evaluation setup" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 423, + 296, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 423, + 296, + 555 + ], + "spans": [ + { + "bbox": [ + 55, + 423, + 296, + 555 + ], + "type": "text", + "content": "In our comparison, we include approaches where the reduction is applied at a single layer, similar to PACT, such as FastV and clustering-based visual token reduction. For these approaches, we refer to the reduction ratio as the relative reduction in the number of visual tokens, defined as " + }, + { + "bbox": [ + 55, + 423, + 296, + 555 + ], + "type": "inline_equation", + "content": "1 - \\frac{\\text{number of visual tokens after reduction}}{\\text{number of visual tokens before reduction}}" + }, + { + "bbox": [ + 55, + 423, + 296, + 555 + ], + "type": "text", + "content": ". For all these approaches, we use the same value of " + }, + { + "bbox": [ + 55, + 423, + 296, + 555 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 55, + 423, + 296, + 555 + ], + "type": "text", + "content": " and vary hyperparameters to test across different reduction ratios. For methods that use progressive token reduction, like ToME [6], or apply reduction after the visual encoder, as PruMerge and HiReD, or when the reduction ratio cannot be controlled at a fixed" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 57, + 570, + 294, + 668 + ], + "blocks": [ + { + "bbox": [ + 57, + 570, + 294, + 668 + ], + "lines": [ + { + "bbox": [ + 57, + 570, + 294, + 668 + ], + "spans": [ + { + "bbox": [ + 57, + 570, + 294, + 668 + ], + "type": "image", + "image_path": "034e296c0cda8f8c270bbdf99aafdb75408397f5567fbda05c825203f40fcaa7.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 677, + 295, + 709 + ], + "lines": [ + { + "bbox": [ + 55, + 677, + 295, + 709 + ], + "spans": [ + { + "bbox": [ + 55, + 677, + 295, + 709 + ], + "type": "text", + "content": "Figure 7. Comparison between PACT and other visual token reduction methods across various reduction ratios applied on InternVL2-8B." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 72, + 555, + 457 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 555, + 457 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 555, + 457 + ], + "type": "text", + "content": "layer, such as VTW, we adjust the parameters of these approaches to achieve the same average number of visual tokens across all layers as the one-layer reduction methods for a given reduction ratio. When evaluating clustering algorithms for visual token reduction, we apply proportional attention, as it consistently improves performance across all clustering algorithms, especially at high reduction ratios. Additionally, it is crucial to correctly assign position IDs to the resulting reduced set of visual tokens. Details on the assignment strategy are presented in Appendix E. When reporting processing time or throughput, we take into account the total time required by both the language model and the reduction algorithm per input element. In the next section, we base our comparison on a metric called the Approach-to-Reference Metric Ratio, defined as the average of the ratio of the metric of the tested approach to the metric obtained without visual token reduction across all test datasets. Formally we have Approach-to-Reference Metric Ratio " + }, + { + "bbox": [ + 313, + 72, + 555, + 457 + ], + "type": "inline_equation", + "content": "= \\frac{1}{N} \\sum_{i=1}^{N} \\frac{\\text{Metric with reduction}(i)}{\\text{Metric no reduction}(i)}" + }, + { + "bbox": [ + 313, + 72, + 555, + 457 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 313, + 72, + 555, + 457 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 313, + 72, + 555, + 457 + ], + "type": "text", + "content": " is the total number of test datasets. This metric indicates how much of the original model capacity is retained. It is important to note that when using ToME for visual token reduction, a reduction ratio greater than 50% can't be achieved if the number of visual tokens is reduced by a fixed amount in each layer, as suggested in [6]. Instead, we use a scheduler to achieve higher reduction ratios, which we explain in Appendix F. More details on the hyperparameters used for evaluating PACT are provided in Appendix G. We follow the same dataset splits and metrics used for evaluating LLaVA-OneVision wherever feasible. More details are provided in Appendix H. Note that all experiments were conducted on a single A100 GPU." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 314, + 464, + 372, + 475 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 464, + 372, + 475 + ], + "spans": [ + { + "bbox": [ + 314, + 464, + 372, + 475 + ], + "type": "text", + "content": "4.3. Results" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 480, + 554, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 480, + 554, + 554 + ], + "spans": [ + { + "bbox": [ + 313, + 480, + 554, + 554 + ], + "type": "text", + "content": "We compare PACT with FastV [9], VTW [30], ToME [6], PruMerge [44] and HiRED [3] on LLaVA-OneVision7B, InternVL2-8B, Qwen2-VL-7B-Instruct and LLaVA1.6-Mistral-7B. Since HiRED and PruMerge are only applicable to LLaVA-1.6, we exclude them from other comparisons. As shown in figures 5, 6, 7, and 8 PACT con" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 316, + 570, + 553, + 669 + ], + "blocks": [ + { + "bbox": [ + 316, + 570, + 553, + 669 + ], + "lines": [ + { + "bbox": [ + 316, + 570, + 553, + 669 + ], + "spans": [ + { + "bbox": [ + 316, + 570, + 553, + 669 + ], + "type": "image", + "image_path": "935d857f27e28290132519c8ef931d40fe2dec2f6c3021d2718a77c20d124ce9.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 677, + 555, + 709 + ], + "lines": [ + { + "bbox": [ + 313, + 677, + 555, + 709 + ], + "spans": [ + { + "bbox": [ + 313, + 677, + 555, + 709 + ], + "type": "text", + "content": "Figure 8. Comparison between PACT and other visual token reduction methods across various reduction ratios applied on LLaVA-1.6-Mistral-7B." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 56, + 123, + 555, + 325 + ], + "blocks": [ + { + "bbox": [ + 55, + 70, + 555, + 114 + ], + "lines": [ + { + "bbox": [ + 55, + 70, + 555, + 114 + ], + "spans": [ + { + "bbox": [ + 55, + 70, + 555, + 114 + ], + "type": "text", + "content": "Table 2. Comparison of PACT with FastV, VTW, and ToME on LLaVA-OneVision-7B. Algo. Time refers to the average time the algorithm takes per input element, measured in seconds. Proc. Time refers to the average time taken by both the language model and the reduction algorithm per input element. Red. Ratio stands for average Reduction Ratio. The Algo. Time for VTW is nearly zero, and thus omitted. The different visual token reduction methods are evaluated at the same reduction ratio as PACT." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 56, + 123, + 555, + 325 + ], + "lines": [ + { + "bbox": [ + 56, + 123, + 555, + 325 + ], + "spans": [ + { + "bbox": [ + 56, + 123, + 555, + 325 + ], + "type": "table", + "html": "
DatasetNo reductionPACT (Ours)FastVVTWToME
MetricProc. TimeMetricRed. RatioProc. TimeAlgo. TimeMetricProc. TimeAlgo. TimeMetricProc. TimeMetricProc. TimeAlgo. Time
VideoMME58.50.79257.665.6%0.3690.02157.00.3710.04046.90.29657.00.4170.091
MME15790.5541564.070.2%0.2430.0171576.00.2440.016842.00.2311556.90.3170.084
DocVQA87.21.08884.467.9%0.5190.02684.30.5240.05110.50.44961.90.5760.099
MLVU65.20.79564.766.4%0.3610.02262.90.3690.04054.40.31263.40.4170.092
LLaVA-Interleave64.10.24964.069.7%0.1330.01058.90.1390.00732.40.12350.30.1920.068
ChartQA79.90.67176.568.5%0.3410.01977.00.3420.01616.60.30763.40.4020.082
MMBench80.60.24980.369.3%0.1350.01079.00.1400.00552.40.12579.70.1930.066
MuirBench42.00.38443.167.8%0.1780.01340.40.1780.00934.90.16240.50.2330.072
ScienceQA95.90.23893.869.6%0.1330.01091.60.1370.00680.00.12493.80.1900.066
MMMU49.20.13948.970.4%0.1040.00748.90.1060.00343.50.09348.60.1240.062
AI2D81.50.38281.069.8%0.1860.01379.40.1910.01469.70.17779.70.2440.073
InfographicVQA66.00.89561.964.7%0.4810.02358.60.4830.04024.50.40848.30.6070.130
MMStar62.00.29760.169.7%0.1470.01158.60.1520.00737.20.16560.10.2290.069
ActivityNetQA54.50.92155.170.0%0.4190.02953.70.4250.04236.60.39454.10.5130.203
MM-LiveBench73.14.43471.767.5%3.2120.04764.43.2210.04441.03.08064.23.6070.102
LLaVA-Wilder71.010.1071.570.0%8.2620.03571.08.2630.02548.87.51568.07.9260.085
MathVerse16.80.83116.674.2%0.3610.02116.10.3820.03617.60.30116.50.5590.150
MathVista63.30.44062.070.7%0.2710.01559.50.2720.01638.50.26055.00.3380.071
MMVet58.04.60258.470.4%3.7930.03551.73.7950.03615.73.65247.24.1150.212
Vibe-Eval41.65.15339.171.1%3.7090.03238.23.7140.04712.33.55031.24.3170.095
VideoChatGPT3.252.9723.2567.2%1.8630.0293.221.8660.0401.921.3203.191.9750.205
EgoSchema60.10.81160.166.6%0.3510.02158.70.3530.04444.80.29759.80.3910.091
PerceptionTest52.10.80152.366.9%0.3530.02351.70.3570.04045.00.29651.10.3930.090
TextVQA75.80.69075.067.2%0.3320.02375.50.3360.02911.60.28762.50.3920.087
", + "image_path": "6781bd4dacc8d2c302580b8fb7ca94d0f5aaeb7217ecb00a2d70b937c7b1cd3b.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 54, + 344, + 294, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 344, + 294, + 536 + ], + "spans": [ + { + "bbox": [ + 54, + 344, + 294, + 536 + ], + "type": "text", + "content": "siently outperforms other methods at both equal reduction ratios and equal throughput across all four models. VTW experiences a significant performance drop for reduction ratios above " + }, + { + "bbox": [ + 54, + 344, + 294, + 536 + ], + "type": "inline_equation", + "content": "40\\%" + }, + { + "bbox": [ + 54, + 344, + 294, + 536 + ], + "type": "text", + "content": ", indicating that removing all visual tokens is only effective when done in later layers. FastV and ToME struggle at high reduction ratios, while PruMerge and HiRED exhibit degradation even at low reduction ratios. Meanwhile, PACT maintains acceptable performance even at high reduction ratios. Table 2 and Table 3 shows that PACT outperforms other approaches on most of the test datasets when applied on LLaVA-OneVision-7B and Qwen2-VL-7B-Instruct. The same conclusion applies to other models, with detailed results provided in Appendix I. In Tab. 1, we report the reduction ratio, throughput, and maximum GPU memory consumption of the different approaches at an equal Approach-to-Reference Metric Ra" + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 58, + 559, + 175, + 654 + ], + "blocks": [ + { + "bbox": [ + 58, + 559, + 175, + 654 + ], + "lines": [ + { + "bbox": [ + 58, + 559, + 175, + 654 + ], + "spans": [ + { + "bbox": [ + 58, + 559, + 175, + 654 + ], + "type": "image", + "image_path": "bbee2722cb3ddcd6e6492ed26fea8d5f65ff30ed2c01704da099312ba5eed042.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 677, + 295, + 709 + ], + "lines": [ + { + "bbox": [ + 55, + 677, + 295, + 709 + ], + "spans": [ + { + "bbox": [ + 55, + 677, + 295, + 709 + ], + "type": "text", + "content": "Figure 9. Comparison of DBDPC and other clustering algorithms for visual token reduction at different reduction ratios on LLaVA-OneVision-7B." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 177, + 559, + 294, + 654 + ], + "blocks": [ + { + "bbox": [ + 177, + 559, + 294, + 654 + ], + "lines": [ + { + "bbox": [ + 177, + 559, + 294, + 654 + ], + "spans": [ + { + "bbox": [ + 177, + 559, + 294, + 654 + ], + "type": "image", + "image_path": "5d0471b07592f17764f244fb83f43851f0af23eb77d5e45eb7c28eaf67db9494.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 313, + 344, + 555, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 344, + 555, + 632 + ], + "spans": [ + { + "bbox": [ + 313, + 344, + 555, + 632 + ], + "type": "text", + "content": "tio of " + }, + { + "bbox": [ + 313, + 344, + 555, + 632 + ], + "type": "inline_equation", + "content": "98.6\\%" + }, + { + "bbox": [ + 313, + 344, + 555, + 632 + ], + "type": "text", + "content": " on LLaVA-OneVision-7B. PACT significantly outperforms the other methods, achieving a reduction ratio of " + }, + { + "bbox": [ + 313, + 344, + 555, + 632 + ], + "type": "inline_equation", + "content": "71.3\\%" + }, + { + "bbox": [ + 313, + 344, + 555, + 632 + ], + "type": "text", + "content": ", a GPU memory reduction of " + }, + { + "bbox": [ + 313, + 344, + 555, + 632 + ], + "type": "inline_equation", + "content": "31\\%" + }, + { + "bbox": [ + 313, + 344, + 555, + 632 + ], + "type": "text", + "content": ", and a " + }, + { + "bbox": [ + 313, + 344, + 555, + 632 + ], + "type": "inline_equation", + "content": "225\\%" + }, + { + "bbox": [ + 313, + 344, + 555, + 632 + ], + "type": "text", + "content": " speedup in the language model's inference time. The per-dataset results used to compute these metrics are shown in Tab. 5. Tab. 1 also indicates that when using FastV, the maximum GPU memory consumption is relatively high due to the costly computation of attention scores. We further compare DBDPC against agglomerative clustering [1], k-means [2], Density Peaks Clustering (DPC) [5], and DBSCAN [14], with results presented in Fig. 9. The graphs reveal that DBDPC consistently outperforms other clustering algorithms for visual token reduction, exhibiting less performance degradation at equal reduction ratios and demonstrating improved computational efficiency, leading to better throughput. These results validate our hypothesis that, for an effective visual token reduction, it is necessary to ensure that the distances between elements within each cluster do not exceed a predefined threshold. Fig. 5 also shows that EUTI consistently outperforms FastV at equal reduction ratios and is less costly, as it does not require the computation of attention scores. In addition, unlike FastV, EUTI does not introduce a GPU memory overhead1. We provide additional numerical results in Appendix I." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 640, + 405, + 653 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 640, + 405, + 653 + ], + "spans": [ + { + "bbox": [ + 313, + 640, + 405, + 653 + ], + "type": "text", + "content": "4.4. Ablation study" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 658, + 555, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 658, + 555, + 694 + ], + "spans": [ + { + "bbox": [ + 313, + 658, + 555, + 694 + ], + "type": "text", + "content": "Fig. 5 shows that PACT consistently outperforms both DBDPC and EUTI across various reduction ratios. This confirms that combining clustering and pruning techniques" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 325, + 702, + 528, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 702, + 528, + 712 + ], + "spans": [ + { + "bbox": [ + 325, + 702, + 528, + 712 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 325, + 702, + 528, + 712 + ], + "type": "text", + "content": "EUTI achieves roughly the same memory reduction as PACT." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 58, + 91, + 555, + 281 + ], + "blocks": [ + { + "bbox": [ + 75, + 70, + 535, + 83 + ], + "lines": [ + { + "bbox": [ + 75, + 70, + 535, + 83 + ], + "spans": [ + { + "bbox": [ + 75, + 70, + 535, + 83 + ], + "type": "text", + "content": "Table 3. Comparison of PACT with FastV, VTW, and ToME applied on Qwen2-VL-7B-Instruct across Various Datasets." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 58, + 91, + 555, + 281 + ], + "lines": [ + { + "bbox": [ + 58, + 91, + 555, + 281 + ], + "spans": [ + { + "bbox": [ + 58, + 91, + 555, + 281 + ], + "type": "table", + "html": "
DatasetNo ReductionPACT (Ours)FastVVTWToME
MetricProc. TimeMetricRed. RatioProc. TimeMetricProc. TimeMetricProc. TimeMetricProc. Time
MME1654.50.2381666.586.3%0.1101500.00.111709.240.1201610.90.140
DocVQA93.90.51690.577.5%0.29486.60.2988.50.24942.90.350
TextVQA81.80.15580.467.5%0.13279.90.13513.20.11866.20.151
InfographicVQA74.60.47870.669.7%0.27863.30.27321.50.22543.90.299
ChartQA80.80.14576.061.1%0.13569.20.13412.90.12355.10.155
MMBench77.60.07477.151.5%0.07777.10.07476.90.07375.90.080
MuirBench40.70.15941.276.9%0.11340.40.11237.90.11175.80.125
MMMU51.40.10951.272.6%0.09349.30.09245.40.08848.90.105
AI2D79.90.10578.464.2%0.09676.20.09769.00.08776.40.115
MMStar56.00.07254.861.3%0.07251.50.06740.30.06553.80.077
EgoSchema62.10.36061.660.0%0.20760.20.21246.30.19061.20.230
MathVerse25.30.62024.582.2%0.39323.70.39613.90.29618.10.651
MathVista59.20.24957.773.3%0.19556.40.19436.80.16553.50.275
MM Vet24.94.70025.180.3%3.82022.33.8302.73.65016.74.780
Vibe-Eval47.53.20046.185.0%2.31044.32.37513.11.99329.63.620
LLaVA-Interleave35.90.12035.573.7%0.10034.70.10133.20.09635.30.125
MM-LiveBench72.63.97070.777.1%3.04063.03.12039.72.97057.64.450
", + "image_path": "ac03a20585c83df2d07c8acc08addbb8f9c595884eeff0f8891abb84395e3e2b.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 300, + 297, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 300, + 297, + 552 + ], + "spans": [ + { + "bbox": [ + 55, + 300, + 297, + 552 + ], + "type": "text", + "content": "yields better performance than using each approach independently, as the combined method addresses both visual tokens irrelevance and redundancy. We ablate several components of the DBDPC algorithm and present the results in Fig. 10. First, we ablate token merging by selecting the center of each cluster as the representative token instead of merging tokens within each cluster. We also ablate the use of proportional attention. Additionally, we ablate the assignment of position IDs to the reduced set of tokens and experiment with two alternatives: using the mean of position IDs of all elements in each cluster and assigning position IDs sequentially after reordering the reduced set according to the mean of position IDs. Finally, we ablate the use of key vectors in the clustering process and instead use hidden states. Our results show that each ablated component contributes positively to the performance of the DBDPC algorithm. Notably, correctly assigning position IDs to the reduced set is crucial, as these position IDs reflect the structure of input images and the temporal order of input videos. Additionally, proportional attention proves effective at higher reduction ratios, while token merging en" + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 58, + 581, + 175, + 679 + ], + "blocks": [ + { + "bbox": [ + 58, + 581, + 175, + 679 + ], + "lines": [ + { + "bbox": [ + 58, + 581, + 175, + 679 + ], + "spans": [ + { + "bbox": [ + 58, + 581, + 175, + 679 + ], + "type": "image", + "image_path": "f368bf2a470b7eb965c3a82944f7d45511b9292e73f2461e19a596acc4fe5f82.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 687, + 295, + 709 + ], + "lines": [ + { + "bbox": [ + 55, + 687, + 295, + 709 + ], + "spans": [ + { + "bbox": [ + 55, + 687, + 295, + 709 + ], + "type": "text", + "content": "Figure 10. Ablation study of DBDPC and EUTI on LLaVA-OneVision-7B." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 177, + 582, + 294, + 678 + ], + "blocks": [ + { + "bbox": [ + 177, + 582, + 294, + 678 + ], + "lines": [ + { + "bbox": [ + 177, + 582, + 294, + 678 + ], + "spans": [ + { + "bbox": [ + 177, + 582, + 294, + 678 + ], + "type": "image", + "image_path": "991768b7b1eccb7cc3fe0e64629c7ae67379f84e5f598eb26031e9d494ff3b94.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 313, + 300, + 556, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 300, + 556, + 587 + ], + "spans": [ + { + "bbox": [ + 313, + 300, + 556, + 587 + ], + "type": "text", + "content": "hances performance once the reduction ratio exceeds " + }, + { + "bbox": [ + 313, + 300, + 556, + 587 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 313, + 300, + 556, + 587 + ], + "type": "text", + "content": ". The figure also confirms that keys are better suited for cosine similarity-based distance calculations, as they are naturally used in dot products within the attention mechanism. We perform two separate ablations on Eq. (2) of the EUTI algorithm. The first ablation removes the use of hidden state norms, while the second ablates the use of the global query, which corresponds to using only the hidden state norms. The results in Fig. 10 show that combining both the global query-based score and the norm of hidden states consistently leads to better results than using either metric alone, suggesting that they provide complementary information about the importance of each visual token. Finally, we ablate the pruned token recovery module in PACT by setting " + }, + { + "bbox": [ + 313, + 300, + 556, + 587 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 313, + 300, + 556, + 587 + ], + "type": "text", + "content": " to zero, with results presented in Fig. 11. The plot shows that reintegrating visual tokens initially deemed unimportant but close enough to a cluster center consistently enhances performance across different reduction ratios, supporting our hypothesis that these tokens were likely mislabeled by the EUTI module. Figure 11 also shows the effect of the choice of the reduction layer on PACT's performance, demonstrating the effectiveness of our reduction layer identification approach. We provide additional numerical results in Appendix J." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 316, + 602, + 424, + 689 + ], + "blocks": [ + { + "bbox": [ + 316, + 602, + 424, + 689 + ], + "lines": [ + { + "bbox": [ + 316, + 602, + 424, + 689 + ], + "spans": [ + { + "bbox": [ + 316, + 602, + 424, + 689 + ], + "type": "image", + "image_path": "a13ddcbf4f57cce0737e9972df5ccd4e3a0e4c3b45929d11bb24821088eda9ec.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 316, + 699, + 553, + 711 + ], + "lines": [ + { + "bbox": [ + 316, + 699, + 553, + 711 + ], + "spans": [ + { + "bbox": [ + 316, + 699, + 553, + 711 + ], + "type": "text", + "content": "Figure 11. Ablation study of PACT on LLaVA-OneVision-7B." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 426, + 602, + 553, + 689 + ], + "blocks": [ + { + "bbox": [ + 426, + 602, + 553, + 689 + ], + "lines": [ + { + "bbox": [ + 426, + 602, + 553, + 689 + ], + "spans": [ + { + "bbox": [ + 426, + 602, + 553, + 689 + ], + "type": "image", + "image_path": "1ead4ca5498c7b123994ccf382a10967b041bc2ad7b30217535496e774d44fd7.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 733, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 733, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 733, + 309, + 741 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 71, + 128, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 71, + 128, + 83 + ], + "spans": [ + { + "bbox": [ + 56, + 71, + 128, + 83 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 91, + 296, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 91, + 296, + 246 + ], + "spans": [ + { + "bbox": [ + 55, + 91, + 296, + 246 + ], + "type": "text", + "content": "In this work, we presented PACT, a method that addresses both visual token irrelevance and redundancy. PACT is a plug-and-play solution that does not require additional training. It does not rely on textual tokens for visual token reduction, making it well-suited for multi-turn conversations. Additionally, it operates independently of the visual encoder and connector architecture, making it broadly applicable across various Visual Language Models. Our results confirm that the number of visual tokens in Visual Language Models is unnecessarily large and provide valuable insights for effective token reduction. This opens the door for future work in designing more efficient connectors and architectures for VLMs." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 258, + 164, + 272 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 258, + 164, + 272 + ], + "spans": [ + { + "bbox": [ + 55, + 258, + 164, + 272 + ], + "type": "text", + "content": "6. Acknowledgments" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 277, + 296, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 277, + 296, + 344 + ], + "spans": [ + { + "bbox": [ + 55, + 277, + 296, + 344 + ], + "type": "text", + "content": "This work received financial support from Crédit Agricole S.A. through the research chair with Ecole Polytechnique on Trustworthy and Responsible AI. This work was granted access to the HPC resources of IDRIS under the allocation 2024-AD011014793R1 made by GENCI." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 356, + 115, + 369 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 356, + 115, + 369 + ], + "spans": [ + { + "bbox": [ + 56, + 356, + 115, + 369 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 61, + 375, + 296, + 714 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 61, + 375, + 295, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 375, + 295, + 408 + ], + "spans": [ + { + "bbox": [ + 61, + 375, + 295, + 408 + ], + "type": "text", + "content": "[1] Marcel R Ackermann, Johannes Blömer, Daniel Kuntze, and Christian Sohler. Analysis of agglomerative clustering. *Algorithmica*, 69:184-215, 2014. 8, 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 61, + 410, + 296, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 410, + 296, + 453 + ], + "spans": [ + { + "bbox": [ + 61, + 410, + 296, + 453 + ], + "type": "text", + "content": "[2] Mohiuddin Ahmed, Raihan Seraj, and Syed Mohammed Shamsul Islam. The k-means algorithm: A comprehensive survey and performance evaluation. Electronics, 9(8):1295, 2020. 5, 8, 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 61, + 455, + 296, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 455, + 296, + 520 + ], + "spans": [ + { + "bbox": [ + 61, + 455, + 296, + 520 + ], + "type": "text", + "content": "[3] Kazi Hasan Ibn Arif, JinYi Yoon, Dimitrios S Nikolopoulos, Hans Vandierendonck, Deepu John, and Bo Ji. Hired: Attention-guided token dropping for efficient inference of high-resolution vision-language models in resource-constrained environments. arXiv preprint arXiv:2408.10945, 2024. 2, 7" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 62, + 522, + 295, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 522, + 295, + 566 + ], + "spans": [ + { + "bbox": [ + 62, + 522, + 295, + 566 + ], + "type": "text", + "content": "[4] Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-vl: A frontier large vision-language model with versatile abilities. arXiv preprint arXiv:2308.12966, 2023. 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 62, + 567, + 295, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 567, + 295, + 600 + ], + "spans": [ + { + "bbox": [ + 62, + 567, + 295, + 600 + ], + "type": "text", + "content": "[5] Panthadeep Bhattacharjee and Pinaki Mitra. A survey of density based clustering algorithms. Frontiers of Computer Science, 15:1-27, 2021. 5, 8, 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 62, + 601, + 296, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 601, + 296, + 645 + ], + "spans": [ + { + "bbox": [ + 62, + 601, + 296, + 645 + ], + "type": "text", + "content": "[6] Daniel Bolya, Cheng-Yang Fu, Xiaoliang Dai, Peizhao Zhang, Christoph Feichtenhofer, and Judy Hoffman. Token merging: Your vit but faster. arXiv preprint arXiv:2210.09461, 2022. 2, 7, 4" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 62, + 647, + 295, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 647, + 295, + 689 + ], + "spans": [ + { + "bbox": [ + 62, + 647, + 295, + 689 + ], + "type": "text", + "content": "[7] Keqin Chen, Zhao Zhang, Weili Zeng, Richong Zhang, Feng Zhu, and Rui Zhao. Shikra: Unleashing multimodal llm's referential dialogue magic. arXiv preprint arXiv:2306.15195, 2023. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 62, + 691, + 295, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 691, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 62, + 691, + 295, + 714 + ], + "type": "text", + "content": "[8] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Jiaqi Wang, Yu Qiao," + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 555, + 712 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 333, + 73, + 555, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 73, + 555, + 105 + ], + "spans": [ + { + "bbox": [ + 333, + 73, + 555, + 105 + ], + "type": "text", + "content": "Dahua Lin, et al. Are we on the right way for evaluating large vision-language models? arXiv preprint arXiv:2403.20330, 2024. 7" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 320, + 107, + 555, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 107, + 555, + 162 + ], + "spans": [ + { + "bbox": [ + 320, + 107, + 555, + 162 + ], + "type": "text", + "content": "[9] Liang Chen, Haozhe Zhao, Tianyu Liu, Shuai Bai, Junyang Lin, Chang Zhou, and Baobao Chang. An image is worth 1/2 tokens after layer 2: Plug-and-play inference acceleration for large vision-language models. arXiv preprint arXiv:2403.06764, 2024. 2, 3, 7" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 163, + 554, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 163, + 554, + 217 + ], + "spans": [ + { + "bbox": [ + 316, + 163, + 554, + 217 + ], + "type": "text", + "content": "[10] Zhe Chen, Weiyun Wang, Hao Tian, Shenglong Ye, Zhang-wei Gao, Erfei Cui, Wenwen Tong, Kongzhi Hu, Jiapeng Luo, Zheng Ma, et al. How far are we to gpt-4v? closing the gap to commercial multimodal models with open-source suites. arXiv preprint arXiv:2404.16821, 2024. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 220, + 554, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 220, + 554, + 274 + ], + "spans": [ + { + "bbox": [ + 316, + 220, + 554, + 274 + ], + "type": "text", + "content": "[11] Yunfei Chu, Jin Xu, Xiaohuan Zhou, Qian Yang, Shiliang Zhang, Zhijie Yan, Chang Zhou, and Jingren Zhou. Qwen-audio: Advancing universal audio understanding via unified large-scale audio-language models. arXiv preprint arXiv:2311.07919, 2023. 1" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 276, + 554, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 276, + 554, + 308 + ], + "spans": [ + { + "bbox": [ + 316, + 276, + 554, + 308 + ], + "type": "text", + "content": "[12] Tri Dao, Daniel Y. Fu, Stefano Ermon, Atri Rudra, and Christopher Ré. Flashattention: Fast and memory-efficient exact attention with io-awareness, 2022. 1, 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 310, + 554, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 310, + 554, + 364 + ], + "spans": [ + { + "bbox": [ + 316, + 310, + 554, + 364 + ], + "type": "text", + "content": "[13] Mohamed Dhouib, Ghassen Bettaieb, and Aymen Shabou. Docparser: End-to-endOCR-free information extraction from visually rich documents. In International Conference on Document Analysis and Recognition, pages 155-172. Springer, 2023. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 366, + 554, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 366, + 554, + 409 + ], + "spans": [ + { + "bbox": [ + 316, + 366, + 554, + 409 + ], + "type": "text", + "content": "[14] Martin Ester, Hans-Peter Kriegel, Jörg Sander, Xiaowei Xu, et al. A density-based algorithm for discovering clusters in large spatial databases with noise. In kdd, pages 226-231, 1996. 5, 8, 3" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 411, + 554, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 411, + 554, + 464 + ], + "spans": [ + { + "bbox": [ + 316, + 411, + 554, + 464 + ], + "type": "text", + "content": "[15] Chaoyou Fu, Peixian Chen, Yunhang Shen, Yulei Qin, Mengdan Zhang, Xu Lin, Jinrui Yang, Xiawu Zheng, Ke Li, Xing Sun, et al. Mme: A comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394, 2023. 7" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 317, + 468, + 554, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 468, + 554, + 521 + ], + "spans": [ + { + "bbox": [ + 317, + 468, + 554, + 521 + ], + "type": "text", + "content": "[16] Chaoyou Fu, Yuhan Dai, Yondong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, et al. Video-mme: The first-ever comprehensive evaluation benchmark of multi-modal llms in video analysis. arXiv preprint arXiv:2405.21075, 2024. 7" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 317, + 523, + 554, + 577 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 523, + 554, + 577 + ], + "spans": [ + { + "bbox": [ + 317, + 523, + 554, + 577 + ], + "type": "text", + "content": "[17] Tao Gong, Chengqi Lyu, Shilong Zhang, Yudong Wang, Miao Zheng, Qian Zhao, Kuikun Liu, Wenwei Zhang, Ping Luo, and Kai Chen. Multimodal-gpt: A vision and language model for dialogue with humans. arXiv preprint arXiv:2305.04790, 2023. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 317, + 579, + 554, + 623 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 579, + 554, + 623 + ], + "spans": [ + { + "bbox": [ + 317, + 579, + 554, + 623 + ], + "type": "text", + "content": "[18] Jiaming Han, Kaixiong Gong, Yiyuan Zhang, Jiaqi Wang, Kaipeng Zhang, Dahua Lin, Yu Qiao, Peng Gao, and Xiangyu Yue. Onellm: One framework to align all modalities with language. arXiv preprint arXiv:2312.03700, 2023. 1" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 317, + 624, + 554, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 624, + 554, + 678 + ], + "spans": [ + { + "bbox": [ + 317, + 624, + 554, + 678 + ], + "type": "text", + "content": "[19] Shaohan Huang, Li Dong, Wenhui Wang, Yaru Hao, Saksham Singhal, Shuming Ma, Tengchao Lv, Lei Cui, Owais Khan Mohammed, Qiang Liu, et al. Language is not all you need: Aligning perception with language models. arXiv preprint arXiv:2302.14045, 2023. 1" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 681, + 554, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 681, + 554, + 712 + ], + "spans": [ + { + "bbox": [ + 316, + 681, + 554, + 712 + ], + "type": "text", + "content": "[20] Eric Jang, Shixiang Gu, and Ben Poole. Categorical reparameterization with gumbel-softmax. arXiv preprint arXiv:1611.01144, 2016. 2" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 294, + 712 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 56, + 72, + 294, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 294, + 127 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 294, + 127 + ], + "type": "text", + "content": "[21] Yang Jin, Kun Xu, Liwei Chen, Chao Liao, Jianchao Tan, Bin Chen, Chenyi Lei, An Liu, Chengru Song, Xiaogiang Lei, et al. Unified language-vision pretraining with dynamic discrete visual tokenization. arXiv preprint arXiv:2309.04669, 2023. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 129, + 294, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 129, + 294, + 173 + ], + "spans": [ + { + "bbox": [ + 56, + 129, + 294, + 173 + ], + "type": "text", + "content": "[22] Aniruddha Kembhavi, Mike Salvato, Eric Kolve, Minjoon Seo, Hannaneh Hajishirzi, and Ali Farhadi. A diagram is worth a dozen images. In European conference on computer vision, pages 235-251. Springer, 2016. 6" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 175, + 294, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 175, + 294, + 239 + ], + "spans": [ + { + "bbox": [ + 56, + 175, + 294, + 239 + ], + "type": "text", + "content": "[23] Geewook Kim, Teakgyu Hong, Moonbin Yim, JeongYeon Nam, Jinyoung Park, Jinyeong Yim, Wonseok Hwang, Sangdoo Yun, Dongyoon Han, and Seunghyun Park. Ocr-free document understanding transformer. In Computer Vision – ECCV 2022, pages 498–517, Cham, 2022. Springer Nature Switzerland. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 240, + 294, + 283 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 240, + 294, + 283 + ], + "spans": [ + { + "bbox": [ + 56, + 240, + 294, + 283 + ], + "type": "text", + "content": "[24] Bohao Li, Rui Wang, Guangzhi Wang, Yuying Ge, Yixiao Ge, and Ying Shan. Seed-bench: Benchmarking multimodal llms with generative comprehension. arXiv preprint arXiv:2307.16125, 2023. 4" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 286, + 294, + 329 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 286, + 294, + 329 + ], + "spans": [ + { + "bbox": [ + 56, + 286, + 294, + 329 + ], + "type": "text", + "content": "[25] Bo Li, Kaichen Zhang, Hao Zhang, Dong Guo, Renrui Zhang, Feng Li, Yuanhan Zhang, Ziwei Liu, and Chunyuan Li. Llava-last: Stronger llms supercharge multimodal capabilities in the wild, 2024. 7" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 331, + 294, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 331, + 294, + 374 + ], + "spans": [ + { + "bbox": [ + 56, + 331, + 294, + 374 + ], + "type": "text", + "content": "[26] Bo Li, Peiyuan Zhang, Kaichen Zhang, Fanyi Pu, Xinrun Du, Yuhao Dong, Haotian Liu, Yuanhan Zhang, Ge Zhang, Chunyuan Li, and Ziwei Liu. Lmms-eval: Accelerating the development of large multimodal models, 2024. 7, 4, 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 376, + 294, + 420 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 376, + 294, + 420 + ], + "spans": [ + { + "bbox": [ + 56, + 376, + 294, + 420 + ], + "type": "text", + "content": "[27] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024. 2, 4, 5" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 422, + 294, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 422, + 294, + 464 + ], + "spans": [ + { + "bbox": [ + 56, + 422, + 294, + 464 + ], + "type": "text", + "content": "[28] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. arXiv preprint arXiv:2301.12597, 2023. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 466, + 294, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 466, + 294, + 510 + ], + "spans": [ + { + "bbox": [ + 56, + 466, + 294, + 510 + ], + "type": "text", + "content": "[29] Youwei Liang, Chongjian Ge, Zhan Tong, Yibing Song, Jue Wang, and Pengtao Xie. Not all patches are what you need: Expediting vision transformers via token reorganizations. arXiv preprint arXiv:2202.07800, 2022. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 512, + 294, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 512, + 294, + 555 + ], + "spans": [ + { + "bbox": [ + 56, + 512, + 294, + 555 + ], + "type": "text", + "content": "[30] Zhihang Lin, Mingbao Lin, Luxi Lin, and Rongrong Ji. Boosting multimodal large language models with visual tokens withdrawal for rapid inference. arXiv preprint arXiv:2405.05803, 2024. 2, 7" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 556, + 294, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 556, + 294, + 588 + ], + "spans": [ + { + "bbox": [ + 56, + 556, + 294, + 588 + ], + "type": "text", + "content": "[31] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. arXiv preprint arXiv:2304.08485, 2023. 1, 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 590, + 294, + 623 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 590, + 294, + 623 + ], + "spans": [ + { + "bbox": [ + 56, + 590, + 294, + 623 + ], + "type": "text", + "content": "[32] Yuan Liu, Haodong Duan, Yuanhan Zhang, Songyang Zhang Bo Li, and Wangbo Zhao. Mmbench: Is your multi-modal model an all-around player? arXiv:2307.06281, 2023. 7" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 624, + 294, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 624, + 294, + 668 + ], + "spans": [ + { + "bbox": [ + 56, + 624, + 294, + 668 + ], + "type": "text", + "content": "[33] Pan Lu, Swaroop Mishra, Tony Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter Clark, and Ashwin Kalyan. Learn to explain: Multimodal reasoning via thought chains for science question answering, 2022. 7" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 670, + 294, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 670, + 294, + 712 + ], + "spans": [ + { + "bbox": [ + 56, + 670, + 294, + 712 + ], + "type": "text", + "content": "[34] Pan Lu, Hritik Bansal, Tony Xia, Jiacheng Liu, Chunyuan Li, Hannaneh Hajishirzi, Hao Cheng, Kai-Wei Chang, Michel Galley, and Jianfeng Gao. Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts. In In" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 555, + 712 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 335, + 73, + 553, + 94 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 73, + 553, + 94 + ], + "spans": [ + { + "bbox": [ + 335, + 73, + 553, + 94 + ], + "type": "text", + "content": "ternational Conference on Learning Representations (ICLR), 2024. 7" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 96, + 553, + 151 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 96, + 553, + 151 + ], + "spans": [ + { + "bbox": [ + 316, + 96, + 553, + 151 + ], + "type": "text", + "content": "[35] Muhammad Maaz, Hanoona Rasheed, Salman Khan, and Fahad Shahbaz Khan. Video-chatgpt: Towards detailed video understanding via large vision and language models. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (ACL 2024), 2024. 7" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 152, + 553, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 152, + 553, + 196 + ], + "spans": [ + { + "bbox": [ + 316, + 152, + 553, + 196 + ], + "type": "text", + "content": "[36] Karttikeya Mangalam, Raiymbek Akshulakov, and Jitendra Malik. Egoschema: A diagnostic benchmark for very long-form video language understanding. Advances in Neural Information Processing Systems, 36:46212-46244, 2023. 7" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 198, + 553, + 241 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 198, + 553, + 241 + ], + "spans": [ + { + "bbox": [ + 316, + 198, + 553, + 241 + ], + "type": "text", + "content": "[37] Ahmed Masry, Do Xuan Long, Jia Qing Tan, Shafiq Joty, and Enamul Hoque. Chartqa: A benchmark for question answering about charts with visual and logical reasoning. arXiv preprint arXiv:2203.10244, 2022. 6" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 243, + 555, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 243, + 555, + 286 + ], + "spans": [ + { + "bbox": [ + 316, + 243, + 555, + 286 + ], + "type": "text", + "content": "[38] Minesh Mathew, Dimosthenis Karatzas, and CV Jawahar. Docvqa: A dataset for vqa on document images. In Proceedings of the IEEE/CVF winter conference on applications of computer vision, pages 2200-2209, 2021. 6" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 288, + 553, + 332 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 288, + 553, + 332 + ], + "spans": [ + { + "bbox": [ + 316, + 288, + 553, + 332 + ], + "type": "text", + "content": "[39] Minesh Mathew, Viraj Bagal, Ruben Tito, Dimosthenis Karatzas, Ernest Valveny, and CV Jawahar. Infographicvqa. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 1697-1706, 2022. 6" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 333, + 553, + 420 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 333, + 553, + 420 + ], + "spans": [ + { + "bbox": [ + 316, + 333, + 553, + 420 + ], + "type": "text", + "content": "[40] Piotr Padlewski, Max Bain, Matthew Henderson, Zhongkai Zhu, Nishant Relan, Hai Pham, Donovan Ong, Kaloyan Aleksiev, Aitor Ormazabal, Samuel Phua, Ethan Yeo, Eugenie Lamprecht, Qi Liu, Yuqi Wang, Eric Chen, Deyu Fu, Lei Li, Che Zheng, Cyprien de Masson d'Autume, Dani Yogatama, Mikel Artetxe, and Yi Tay. Vibe-eval: A hard evaluation suite for measuring progress of multimodal language models. arXiv preprint arXiv:2405.02287, 2024. 7" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 422, + 553, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 422, + 553, + 487 + ], + "spans": [ + { + "bbox": [ + 316, + 422, + 553, + 487 + ], + "type": "text", + "content": "[41] Viorica Patraucean, Lucas Smaira, Ankush Gupta, Adria Recasens, Larisa Markeeva, Dylan Banarse, Skanda Koppula, Mateusz Malinowski, Yi Yang, Carl Doersch, et al. Perception test: A diagnostic benchmark for multimodal video models. Advances in Neural Information Processing Systems, 36, 2024. 7" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 488, + 553, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 488, + 553, + 544 + ], + "spans": [ + { + "bbox": [ + 316, + 488, + 553, + 544 + ], + "type": "text", + "content": "[42] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision, 2021. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 545, + 553, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 545, + 553, + 567 + ], + "spans": [ + { + "bbox": [ + 316, + 545, + 553, + 567 + ], + "type": "text", + "content": "[43] Erich Schubert. A Triangle Inequality for Cosine Similarity, page 32-44. Springer International Publishing, 2021. 1" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 568, + 553, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 568, + 553, + 611 + ], + "spans": [ + { + "bbox": [ + 316, + 568, + 553, + 611 + ], + "type": "text", + "content": "[44] Yuzhang Shang, Mu Cai, Bingxin Xu, Yong Jae Lee, and Yan Yan. Llava-prumerge: Adaptive token reduction for efficient large multimodal models. arXiv preprint arXiv:2403.15388, 2024. 2, 7" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 613, + 553, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 613, + 553, + 668 + ], + "spans": [ + { + "bbox": [ + 316, + 613, + 553, + 668 + ], + "type": "text", + "content": "[45] Amanpreet Singh, Vivek Natarajan, Meet Shah, Yu Jiang, Xinlei Chen, Dhruv Batra, Devi Parikh, and Marcus Rohrbach. Towards vqa models that can read. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8317-8326, 2019. 6" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 316, + 670, + 553, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 670, + 553, + 712 + ], + "spans": [ + { + "bbox": [ + 316, + 670, + 553, + 712 + ], + "type": "text", + "content": "[46] Dingjie Song, Wenjun Wang, Shunian Chen, Xidong Wang, Michael Guan, and Benyou Wang. Less is more: A simple yet effective token reduction method for efficient multimodal llms. arXiv preprint arXiv:2409.10994, 2024. 2" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 73, + 294, + 696 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 56, + 73, + 294, + 115 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 73, + 294, + 115 + ], + "spans": [ + { + "bbox": [ + 56, + 73, + 294, + 115 + ], + "type": "text", + "content": "[47] Jianlin Su, Murtadha Ahmed, Yu Lu, Shengfeng Pan, Wen Bo, and Yunfeng Liu. Roformer: Enhanced transformer with rotary position embedding. Neurocomputing, 568:127063, 2024. 4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 118, + 294, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 118, + 294, + 172 + ], + "spans": [ + { + "bbox": [ + 56, + 118, + 294, + 172 + ], + "type": "text", + "content": "[48] Fei Wang, Xingyu Fu, James Y Huang, Zekun Li, Qin Liu, Xiaogeng Liu, Mingyu Derek Ma, Nan Xu, Wenxuan Zhou, Kai Zhang, et al. Muirbench: A comprehensive benchmark for robust multi-image understanding. arXiv preprint arXiv:2406.09411, 2024. 7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 174, + 294, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 174, + 294, + 239 + ], + "spans": [ + { + "bbox": [ + 56, + 174, + 294, + 239 + ], + "type": "text", + "content": "[49] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Yang Fan, Kai Dang, Mengfei Du, Xuancheng Ren, Rui Men, Dayiheng Liu, Chang Zhou, Jingren Zhou, and Junyang Lin. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution, 2024. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 240, + 294, + 294 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 240, + 294, + 294 + ], + "spans": [ + { + "bbox": [ + 56, + 240, + 294, + 294 + ], + "type": "text", + "content": "[50] Qinghao Ye, Haiyang Xu, Guohai Xu, Jiabo Ye, Ming Yan, Yiyang Zhou, Junyang Wang, Anwen Hu, Pengcheng Shi, Yaya Shi, et al. mplug-owl: Modularization empowers large language models with multimodality. arXiv preprint arXiv:2304.14178, 2023. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 297, + 294, + 339 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 297, + 294, + 339 + ], + "spans": [ + { + "bbox": [ + 56, + 297, + 294, + 339 + ], + "type": "text", + "content": "[51] Weihao Yu, Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Zicheng Liu, Xinchao Wang, and Lijuan Wang. Mm-vet: Evaluating large multimodal models for integrated capabilities, 2023. 7" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 341, + 294, + 384 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 341, + 294, + 384 + ], + "spans": [ + { + "bbox": [ + 56, + 341, + 294, + 384 + ], + "type": "text", + "content": "[52] Zhou Yu, Dejing Xu, Jun Yu, Ting Yu, Zhou Zhao, Yueting Zhuang, and Dacheng Tao. Activitynet-qa: A dataset for understanding complex web videos via question answering. In AAAI, pages 9127–9134, 2019. 7" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 386, + 294, + 473 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 386, + 294, + 473 + ], + "spans": [ + { + "bbox": [ + 56, + 386, + 294, + 473 + ], + "type": "text", + "content": "[53] Xiang Yue, Yuansheng Ni, Kai Zhang, Tianyu Zheng, Ruoqi Liu, Ge Zhang, Samuel Stevens, Dongfu Jiang, Weiming Ren, Yuxuan Sun, Cong Wei, Botao Yu, Ruibin Yuan, Ren-liang Sun, Ming Yin, Boyuan Zheng, Zhenzhu Yang, Yibo Liu, Wenhao Huang, Huan Sun, Yu Su, and Wenhu Chen. Mmmu: A massive multi-discipline multimodal understanding and reasoning benchmark for expert agi. In Proceedings of CVPR, 2024. 7" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 475, + 294, + 506 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 475, + 294, + 506 + ], + "spans": [ + { + "bbox": [ + 56, + 475, + 294, + 506 + ], + "type": "text", + "content": "[54] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training, 2023. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 509, + 294, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 509, + 294, + 552 + ], + "spans": [ + { + "bbox": [ + 56, + 509, + 294, + 552 + ], + "type": "text", + "content": "[55] Duzhen Zhang, Yahan Yu, Chenxing Li, Jiahua Dong, Dan Su, Chenhui Chu, and Dong Yu. Mm-llms: Recent advances in multimodal large language models. arXiv preprint arXiv:2401.13601, 2024. 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 554, + 294, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 554, + 294, + 586 + ], + "spans": [ + { + "bbox": [ + 56, + 554, + 294, + 586 + ], + "type": "text", + "content": "[56] Hang Zhang, Xin Li, and Lidong Bing. Video-llama: An instruction-tuned audio-visual language model for video understanding. arXiv preprint arXiv:2306.02858, 2023. 1" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 587, + 294, + 641 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 587, + 294, + 641 + ], + "spans": [ + { + "bbox": [ + 56, + 587, + 294, + 641 + ], + "type": "text", + "content": "[57] Renrui Zhang, Dongzhi Jiang, Yichi Zhang, Haokun Lin, Ziyu Guo, Pengshuo Qiu, Aojun Zhou, Pan Lu, Kai-Wei Chang, Peng Gao, and Hongsheng Li. Mathverse: Does your multi-modal llm truly see the diagrams in visual math problems?, 2024. 7" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 643, + 294, + 696 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 643, + 294, + 696 + ], + "spans": [ + { + "bbox": [ + 56, + 643, + 294, + 696 + ], + "type": "text", + "content": "[58] Junjie Zhou, Yan Shu, Bo Zhao, Boya Wu, Shitao Xiao, Xi Yang, Yongping Xiong, Bo Zhang, Tiejun Huang, and Zheng Liu. Mlvu: A comprehensive benchmark for multi-task long video understanding. arXiv preprint arXiv:2406.04264, 2024. 7" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 310, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 310, + 741 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 310, + 741 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 73, + 79, + 541, + 107 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 79, + 541, + 107 + ], + "spans": [ + { + "bbox": [ + 73, + 79, + 541, + 107 + ], + "type": "text", + "content": "PACT: Pruning and Clustering-Based Token Reduction for Faster Visual Language Models Supplementary Materials" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 122, + 288, + 136 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 122, + 288, + 136 + ], + "spans": [ + { + "bbox": [ + 55, + 122, + 288, + 136 + ], + "type": "text", + "content": "A. On the density peaks clustering algorithm" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 143, + 296, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 143, + 296, + 357 + ], + "spans": [ + { + "bbox": [ + 55, + 143, + 296, + 357 + ], + "type": "text", + "content": "Density Peak Clustering (DPC) is a clustering algorithm that identifies cluster centers based on local density and the distance to points with higher density, denoted as " + }, + { + "bbox": [ + 55, + 143, + 296, + 357 + ], + "type": "inline_equation", + "content": "\\delta_{i}" + }, + { + "bbox": [ + 55, + 143, + 296, + 357 + ], + "type": "text", + "content": ". The density, " + }, + { + "bbox": [ + 55, + 143, + 296, + 357 + ], + "type": "inline_equation", + "content": "\\rho_{i}" + }, + { + "bbox": [ + 55, + 143, + 296, + 357 + ], + "type": "text", + "content": ", can be measured by counting the number of points within a cutoff distance " + }, + { + "bbox": [ + 55, + 143, + 296, + 357 + ], + "type": "inline_equation", + "content": "d_{c}" + }, + { + "bbox": [ + 55, + 143, + 296, + 357 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 55, + 143, + 296, + 357 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_{i}" + }, + { + "bbox": [ + 55, + 143, + 296, + 357 + ], + "type": "text", + "content": ", or by using a Gaussian function where nearby points contribute more to the density, " + }, + { + "bbox": [ + 55, + 143, + 296, + 357 + ], + "type": "inline_equation", + "content": "\\rho_{i} = \\sum_{j}\\exp \\left(-\\left(\\frac{d_{ij}}{d_c}\\right)^2\\right)" + }, + { + "bbox": [ + 55, + 143, + 296, + 357 + ], + "type": "text", + "content": ". Points with high " + }, + { + "bbox": [ + 55, + 143, + 296, + 357 + ], + "type": "inline_equation", + "content": "\\rho_{i}" + }, + { + "bbox": [ + 55, + 143, + 296, + 357 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 143, + 296, + 357 + ], + "type": "inline_equation", + "content": "\\delta_{i}" + }, + { + "bbox": [ + 55, + 143, + 296, + 357 + ], + "type": "text", + "content": " values are selected as cluster centers. This selection can be done by defining a threshold " + }, + { + "bbox": [ + 55, + 143, + 296, + 357 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 55, + 143, + 296, + 357 + ], + "type": "text", + "content": " and designating points as cluster centers where " + }, + { + "bbox": [ + 55, + 143, + 296, + 357 + ], + "type": "inline_equation", + "content": "\\rho_{i}\\cdot \\delta_{i}\\geq t\\times \\max (\\rho_{i}\\cdot \\delta_{i})" + }, + { + "bbox": [ + 55, + 143, + 296, + 357 + ], + "type": "text", + "content": ", or by selecting a fixed percentage. Other points are then assigned to the cluster of the nearest higher-density point, iterating from the highest to the lowest density. This process can create clusters of varying shapes, where the maximum distance between elements within a cluster can be extremely large. In extreme cases, the two farthest points in the input data can end up in the same cluster." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 369, + 195, + 382 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 369, + 195, + 382 + ], + "spans": [ + { + "bbox": [ + 55, + 369, + 195, + 382 + ], + "type": "text", + "content": "B. DBDPC Characteristics" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 390, + 296, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 390, + 296, + 426 + ], + "spans": [ + { + "bbox": [ + 55, + 390, + 296, + 426 + ], + "type": "text", + "content": "This section aims to prove that DBDPC guarantees that: Each element's distance to its assigned cluster center is at most " + }, + { + "bbox": [ + 55, + 390, + 296, + 426 + ], + "type": "inline_equation", + "content": "d_{c}" + }, + { + "bbox": [ + 55, + 390, + 296, + 426 + ], + "type": "text", + "content": " and that all cluster centers are at least " + }, + { + "bbox": [ + 55, + 390, + 296, + 426 + ], + "type": "inline_equation", + "content": "d_{c}" + }, + { + "bbox": [ + 55, + 390, + 296, + 426 + ], + "type": "text", + "content": " apart." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 426, + 295, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 426, + 295, + 449 + ], + "spans": [ + { + "bbox": [ + 55, + 426, + 295, + 449 + ], + "type": "text", + "content": "Assume, for contradiction, that at least one of the following statements is false:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 450, + 295, + 512 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 56, + 450, + 295, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 450, + 295, + 486 + ], + "spans": [ + { + "bbox": [ + 56, + 450, + 295, + 486 + ], + "type": "text", + "content": "1. There exists an element " + }, + { + "bbox": [ + 56, + 450, + 295, + 486 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 56, + 450, + 295, + 486 + ], + "type": "text", + "content": " assigned to a cluster such that its distance to the cluster center is greater than " + }, + { + "bbox": [ + 56, + 450, + 295, + 486 + ], + "type": "inline_equation", + "content": "d_{c}" + }, + { + "bbox": [ + 56, + 450, + 295, + 486 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 56, + 450, + 295, + 486 + ], + "type": "inline_equation", + "content": "d_{is} > d_{c}" + }, + { + "bbox": [ + 56, + 450, + 295, + 486 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 487, + 295, + 512 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 487, + 295, + 512 + ], + "spans": [ + { + "bbox": [ + 55, + 487, + 295, + 512 + ], + "type": "text", + "content": "2. There exist two cluster centers " + }, + { + "bbox": [ + 55, + 487, + 295, + 512 + ], + "type": "inline_equation", + "content": "s_1, s_2" + }, + { + "bbox": [ + 55, + 487, + 295, + 512 + ], + "type": "text", + "content": " such that their pairwise distance is at most " + }, + { + "bbox": [ + 55, + 487, + 295, + 512 + ], + "type": "inline_equation", + "content": "d_c" + }, + { + "bbox": [ + 55, + 487, + 295, + 512 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 55, + 487, + 295, + 512 + ], + "type": "inline_equation", + "content": "d_{s_1s_2} \\leq d_c" + }, + { + "bbox": [ + 55, + 487, + 295, + 512 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 55, + 527, + 295, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 527, + 295, + 551 + ], + "spans": [ + { + "bbox": [ + 55, + 527, + 295, + 551 + ], + "type": "text", + "content": "Contradiction for Assumption 1 In DBDPC, each element " + }, + { + "bbox": [ + 55, + 527, + 295, + 551 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 55, + 527, + 295, + 551 + ], + "type": "text", + "content": " is assigned to its closest cluster center:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 562, + 219, + 580 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 562, + 219, + 580 + ], + "spans": [ + { + "bbox": [ + 132, + 562, + 219, + 580 + ], + "type": "interline_equation", + "content": "s_{i} = \\arg \\min_{s\\in C_{\\text{centers}}}d_{is}.", + "image_path": "bda2d3a72a62cb29151eb7947ad887341e88ce3a0c5f7810b01f2d4cc88cd2e6.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 589, + 296, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 589, + 296, + 673 + ], + "spans": [ + { + "bbox": [ + 55, + 589, + 296, + 673 + ], + "type": "text", + "content": "If " + }, + { + "bbox": [ + 55, + 589, + 296, + 673 + ], + "type": "inline_equation", + "content": "d_{is} > d_c" + }, + { + "bbox": [ + 55, + 589, + 296, + 673 + ], + "type": "text", + "content": " for a given center " + }, + { + "bbox": [ + 55, + 589, + 296, + 673 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 55, + 589, + 296, + 673 + ], + "type": "text", + "content": ", then we have " + }, + { + "bbox": [ + 55, + 589, + 296, + 673 + ], + "type": "inline_equation", + "content": "d_{is'} > d_c" + }, + { + "bbox": [ + 55, + 589, + 296, + 673 + ], + "type": "text", + "content": " for all centers. However, in the DBDPC selection process, an element is assigned as a cluster center if its minimum distance to already selected centers is over " + }, + { + "bbox": [ + 55, + 589, + 296, + 673 + ], + "type": "inline_equation", + "content": "d_c" + }, + { + "bbox": [ + 55, + 589, + 296, + 673 + ], + "type": "text", + "content": ". Thus, " + }, + { + "bbox": [ + 55, + 589, + 296, + 673 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 55, + 589, + 296, + 673 + ], + "type": "text", + "content": " should have been selected as a new cluster center, and its distance to the closest cluster center would be zero, which leads to a contradiction, proving that every element satisfies " + }, + { + "bbox": [ + 55, + 589, + 296, + 673 + ], + "type": "inline_equation", + "content": "d_{is} \\leq d_c" + }, + { + "bbox": [ + 55, + 589, + 296, + 673 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "text", + "content": "Contradiction for Assumption 2 Assume, without loss of generality, that " + }, + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "inline_equation", + "content": "s_2" + }, + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "text", + "content": " is chosen after " + }, + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "inline_equation", + "content": "s_1" + }, + { + "bbox": [ + 55, + 689, + 296, + 714 + ], + "type": "text", + "content": ". By the center selec" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 124, + 503, + 136 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 124, + 503, + 136 + ], + "spans": [ + { + "bbox": [ + 313, + 124, + 503, + 136 + ], + "type": "text", + "content": "tion criterion, a new center " + }, + { + "bbox": [ + 313, + 124, + 503, + 136 + ], + "type": "inline_equation", + "content": "s_2" + }, + { + "bbox": [ + 313, + 124, + 503, + 136 + ], + "type": "text", + "content": " is added only if:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 395, + 145, + 471, + 163 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 395, + 145, + 471, + 163 + ], + "spans": [ + { + "bbox": [ + 395, + 145, + 471, + 163 + ], + "type": "interline_equation", + "content": "\\min_{s\\in C_{\\text{centers}}}d_{s_{2}s} > d_{c}.", + "image_path": "bf312f87129bcef4dc7f9c5c4aa5297fd4507bf413c59934064a9d43700ba3bf.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 171, + 553, + 206 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 171, + 553, + 206 + ], + "spans": [ + { + "bbox": [ + 313, + 171, + 553, + 206 + ], + "type": "text", + "content": "If " + }, + { + "bbox": [ + 313, + 171, + 553, + 206 + ], + "type": "inline_equation", + "content": "d_{s_1 s_2} \\leq d_c" + }, + { + "bbox": [ + 313, + 171, + 553, + 206 + ], + "type": "text", + "content": ", then " + }, + { + "bbox": [ + 313, + 171, + 553, + 206 + ], + "type": "inline_equation", + "content": "s_2" + }, + { + "bbox": [ + 313, + 171, + 553, + 206 + ], + "type": "text", + "content": " shouldn't be selected as a cluster center, which leads to a contradiction. Thus, no two centers can be closer than " + }, + { + "bbox": [ + 313, + 171, + 553, + 206 + ], + "type": "inline_equation", + "content": "d_c" + }, + { + "bbox": [ + 313, + 171, + 553, + 206 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 207, + 554, + 278 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 207, + 554, + 278 + ], + "spans": [ + { + "bbox": [ + 313, + 207, + 554, + 278 + ], + "type": "text", + "content": "Inter-cluster distance upper-bound : Here we will refer to cosine similarity by sim. Let's " + }, + { + "bbox": [ + 313, + 207, + 554, + 278 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 313, + 207, + 554, + 278 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 207, + 554, + 278 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 313, + 207, + 554, + 278 + ], + "type": "text", + "content": " be two points in the same cluster, and " + }, + { + "bbox": [ + 313, + 207, + 554, + 278 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 313, + 207, + 554, + 278 + ], + "type": "text", + "content": " their cluster center. Since each point " + }, + { + "bbox": [ + 313, + 207, + 554, + 278 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 313, + 207, + 554, + 278 + ], + "type": "text", + "content": " is within " + }, + { + "bbox": [ + 313, + 207, + 554, + 278 + ], + "type": "inline_equation", + "content": "d_c" + }, + { + "bbox": [ + 313, + 207, + 554, + 278 + ], + "type": "text", + "content": " of its cluster center " + }, + { + "bbox": [ + 313, + 207, + 554, + 278 + ], + "type": "inline_equation", + "content": "\\mathbf{s}" + }, + { + "bbox": [ + 313, + 207, + 554, + 278 + ], + "type": "text", + "content": " and the distance used in the DBDPC algorithm is " + }, + { + "bbox": [ + 313, + 207, + 554, + 278 + ], + "type": "inline_equation", + "content": "1 - \\mathrm{sim}" + }, + { + "bbox": [ + 313, + 207, + 554, + 278 + ], + "type": "text", + "content": ", we have " + }, + { + "bbox": [ + 313, + 207, + 554, + 278 + ], + "type": "inline_equation", + "content": "\\mathrm{sim}(\\mathbf{x},\\mathbf{s})\\geq 1 - d_c" + }, + { + "bbox": [ + 313, + 207, + 554, + 278 + ], + "type": "text", + "content": ". We have from [43]:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 338, + 287, + 529, + 300 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 338, + 287, + 529, + 300 + ], + "spans": [ + { + "bbox": [ + 338, + 287, + 529, + 300 + ], + "type": "interline_equation", + "content": "\\operatorname {s i m} (\\mathbf {x}, \\mathbf {y}) \\geq \\operatorname {s i m} (\\mathbf {x}, \\mathbf {s}) \\cdot \\operatorname {s i m} (\\mathbf {s}, \\mathbf {y}) + m - 1,", + "image_path": "244bf629c5d74d41ab4b46ad2bbd1f03b64ae38d10496011c5e3fdb93b71607c.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 346, + 308, + 520, + 328 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 346, + 308, + 520, + 328 + ], + "spans": [ + { + "bbox": [ + 346, + 308, + 520, + 328 + ], + "type": "interline_equation", + "content": "\\text {w h e r e} m = \\min \\left\\{\\operatorname {s i m} (\\mathbf {x}, \\mathbf {s}) ^ {2}, \\operatorname {s i m} (\\mathbf {s}, \\mathbf {y}) ^ {2} \\right\\}.", + "image_path": "892add56fdf618d70b81707cdbf73b8a737ebba71017e7b8dc70e2a3d7ae4917.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 333, + 493, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 333, + 493, + 345 + ], + "spans": [ + { + "bbox": [ + 313, + 333, + 493, + 345 + ], + "type": "text", + "content": "Using " + }, + { + "bbox": [ + 313, + 333, + 493, + 345 + ], + "type": "inline_equation", + "content": "\\mathrm{sim}(\\mathbf{x},\\mathbf{s}),\\mathrm{sim}(\\mathbf{s},\\mathbf{y})\\geq 1 - d_c" + }, + { + "bbox": [ + 313, + 333, + 493, + 345 + ], + "type": "text", + "content": " we get" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 353, + 557, + 366 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 353, + 557, + 366 + ], + "spans": [ + { + "bbox": [ + 313, + 353, + 557, + 366 + ], + "type": "interline_equation", + "content": "\\operatorname {s i m} (\\mathbf {x}, \\mathbf {y}) \\geq (1 - d _ {c}) ^ {2} + (1 - d _ {c}) ^ {2} - 1 = 1 - 2 d _ {c} (2 - d _ {c}).", + "image_path": "7b4dfda90299802b9ec3d227d27b3e6f0613a456c69c577ec4c432de34d926a1.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 313, + 375, + 554, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 375, + 554, + 399 + ], + "spans": [ + { + "bbox": [ + 313, + 375, + 554, + 399 + ], + "type": "text", + "content": "Finally, converting this back to the distance " + }, + { + "bbox": [ + 313, + 375, + 554, + 399 + ], + "type": "inline_equation", + "content": "d(\\mathbf{x}, \\mathbf{y}) = 1 - \\sin(\\mathbf{x}, \\mathbf{y})" + }, + { + "bbox": [ + 313, + 375, + 554, + 399 + ], + "type": "text", + "content": ", we obtain:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 380, + 407, + 487, + 421 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 380, + 407, + 487, + 421 + ], + "spans": [ + { + "bbox": [ + 380, + 407, + 487, + 421 + ], + "type": "interline_equation", + "content": "d (\\mathbf {x}, \\mathbf {y}) \\leq 2 d _ {c} (2 - d _ {c}).", + "image_path": "c71d96b011c757c352e6f8eebc1d66a311b0ef3c469187a7a8d88c82973eb2d9.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 313, + 430, + 554, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 430, + 554, + 453 + ], + "spans": [ + { + "bbox": [ + 313, + 430, + 554, + 453 + ], + "type": "text", + "content": "Therefore, the intra-cluster distance in the DBDPC algorithm is bounded by " + }, + { + "bbox": [ + 313, + 430, + 554, + 453 + ], + "type": "inline_equation", + "content": "2d_{c}(2 - d_{c})" + }, + { + "bbox": [ + 313, + 430, + 554, + 453 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 313, + 464, + 554, + 491 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 464, + 554, + 491 + ], + "spans": [ + { + "bbox": [ + 313, + 464, + 554, + 491 + ], + "type": "text", + "content": "C. A comparison between DBDPC and other clustering algorithms" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 313, + 498, + 555, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 498, + 555, + 689 + ], + "spans": [ + { + "bbox": [ + 313, + 498, + 555, + 689 + ], + "type": "text", + "content": "Comparison between DBDPC and DPC: We note that, aside from using densities, DBDPC is fundamentally different from DPC. Please refer to Appendix A for a detailed explanation of the DPC algorithm. The center identification process in DBDPC results in two main characteristics with formal proof detailed in Appendix B. First, the distance between each element and its cluster center is below " + }, + { + "bbox": [ + 313, + 498, + 555, + 689 + ], + "type": "inline_equation", + "content": "d_{c}" + }, + { + "bbox": [ + 313, + 498, + 555, + 689 + ], + "type": "text", + "content": ", which leads to inter-cluster distances being upper-bounded by " + }, + { + "bbox": [ + 313, + 498, + 555, + 689 + ], + "type": "inline_equation", + "content": "2d_{c} \\times (2 - d_{c})" + }, + { + "bbox": [ + 313, + 498, + 555, + 689 + ], + "type": "text", + "content": ". Additionally, the distance between cluster centers is lower-bounded by " + }, + { + "bbox": [ + 313, + 498, + 555, + 689 + ], + "type": "inline_equation", + "content": "d_{c}" + }, + { + "bbox": [ + 313, + 498, + 555, + 689 + ], + "type": "text", + "content": ". These guarantees do not hold for DPC, leading to two drawbacks. Since intercluster distances are not controlled, merging these vectors may result in merging highly dissimilar vectors, leading to information loss. Also, in high-density regions, the distance between cluster centers becomes too small, making DPC ineffective in addressing information redundancy." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 313, + 689, + 554, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 689, + 554, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 689, + 554, + 713 + ], + "type": "text", + "content": "A Qualitative comparison Figure 12 presents the clustering results for DBDPC, DPC, DBSCAN, and K-Means on a" + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 56, + 98, + 296, + 135 + ], + "blocks": [ + { + "bbox": [ + 56, + 72, + 294, + 95 + ], + "lines": [ + { + "bbox": [ + 56, + 72, + 294, + 95 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 294, + 95 + ], + "type": "text", + "content": "Algorithm 4 Recursive Center Identification for DBDPC with Iterative Center Identification" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 56, + 98, + 296, + 135 + ], + "lines": [ + { + "bbox": [ + 56, + 98, + 296, + 135 + ], + "spans": [ + { + "bbox": [ + 56, + 98, + 296, + 135 + ], + "type": "text", + "content": "Input: Cutoff distance " + }, + { + "bbox": [ + 56, + 98, + 296, + 135 + ], + "type": "inline_equation", + "content": "d_{c}\\in \\mathbb{R}^{+}" + }, + { + "bbox": [ + 56, + 98, + 296, + 135 + ], + "type": "text", + "content": " , set of vectors " + }, + { + "bbox": [ + 56, + 98, + 296, + 135 + ], + "type": "inline_equation", + "content": "\\mathbf{U} = \\{\\mathbf{u}_i\\in" + }, + { + "bbox": [ + 56, + 98, + 296, + 135 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^{d_l}\\}_{i = 1}^n" + }, + { + "bbox": [ + 56, + 98, + 296, + 135 + ], + "type": "text", + "content": " , density values " + }, + { + "bbox": [ + 56, + 98, + 296, + 135 + ], + "type": "inline_equation", + "content": "\\{\\rho_i\\}_{i = 1}^n" + }, + { + "bbox": [ + 56, + 98, + 296, + 135 + ], + "type": "text", + "content": " , distance matrix " + }, + { + "bbox": [ + 56, + 98, + 296, + 135 + ], + "type": "inline_equation", + "content": "D =" + }, + { + "bbox": [ + 56, + 98, + 296, + 135 + ], + "type": "inline_equation", + "content": "[d_{ij}]" + }, + { + "bbox": [ + 56, + 98, + 296, + 135 + ], + "type": "text", + "content": " , fallback threshold " + }, + { + "bbox": [ + 56, + 98, + 296, + 135 + ], + "type": "inline_equation", + "content": "T > 0" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_body" + } + ], + "index": 1, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 56, + 135, + 216, + 158 + ], + "blocks": [ + { + "bbox": [ + 56, + 135, + 216, + 158 + ], + "lines": [ + { + "bbox": [ + 56, + 135, + 216, + 158 + ], + "spans": [ + { + "bbox": [ + 56, + 135, + 216, + 158 + ], + "type": "text", + "content": "Output: Cluster center indices " + }, + { + "bbox": [ + 56, + 135, + 216, + 158 + ], + "type": "inline_equation", + "content": "C_{\\text{centers}}" + }, + { + "bbox": [ + 56, + 135, + 216, + 158 + ], + "type": "text", + "content": " Initialize cluster center set " + }, + { + "bbox": [ + 56, + 135, + 216, + 158 + ], + "type": "inline_equation", + "content": "C_{\\text{centers}} =" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_body" + } + ], + "index": 2, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 66, + 159, + 187, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 159, + 187, + 171 + ], + "spans": [ + { + "bbox": [ + 66, + 159, + 187, + 171 + ], + "type": "text", + "content": "Set the density of each point :" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 124, + 179, + 236, + 195 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 179, + 236, + 195 + ], + "spans": [ + { + "bbox": [ + 124, + 179, + 236, + 195 + ], + "type": "interline_equation", + "content": "\\rho_ {i} = \\mathrm {a r g s o r t} \\big (\\{- \\rho_ {j} \\} _ {j = 1} ^ {n} \\big) [ i ]", + "image_path": "54e72effdf75ab7343329b8a3ac28f916953ad39803d2db1333e86fcd61af826.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 66, + 201, + 135, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 201, + 135, + 213 + ], + "spans": [ + { + "bbox": [ + 66, + 201, + 135, + 213 + ], + "type": "text", + "content": "while " + }, + { + "bbox": [ + 66, + 201, + 135, + 213 + ], + "type": "inline_equation", + "content": "\\mathbf{U}\\neq \\emptyset" + }, + { + "bbox": [ + 66, + 201, + 135, + 213 + ], + "type": "text", + "content": " do" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 80, + 213, + 223, + 226 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 213, + 223, + 226 + ], + "spans": [ + { + "bbox": [ + 80, + 213, + 223, + 226 + ], + "type": "text", + "content": "Compute " + }, + { + "bbox": [ + 80, + 213, + 223, + 226 + ], + "type": "inline_equation", + "content": "\\delta_{i}" + }, + { + "bbox": [ + 80, + 213, + 223, + 226 + ], + "type": "text", + "content": " for all vectors " + }, + { + "bbox": [ + 80, + 213, + 223, + 226 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_i\\in \\mathbf{U}" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 150, + 236, + 209, + 255 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 236, + 209, + 255 + ], + "spans": [ + { + "bbox": [ + 150, + 236, + 209, + 255 + ], + "type": "interline_equation", + "content": "\\delta_ {i} = \\min _ {\\rho_ {j} > \\rho_ {i}} d _ {i j}", + "image_path": "e643be71e64e169a48e1ff88c1873c8becf45e63affe57e5dc8aded5f62fe043.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 80, + 262, + 184, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 262, + 184, + 274 + ], + "spans": [ + { + "bbox": [ + 80, + 262, + 184, + 274 + ], + "type": "text", + "content": "Select cluster candidates:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 122, + 284, + 238, + 297 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 284, + 238, + 297 + ], + "spans": [ + { + "bbox": [ + 122, + 284, + 238, + 297 + ], + "type": "interline_equation", + "content": "\\mathbf {C} _ {\\text {n e w}} = \\left\\{\\mathbf {u} _ {i} \\in \\mathbf {U} \\mid \\delta_ {i} > d _ {c} \\right\\}", + "image_path": "f8cba66984af80f0f28d03c457da7368c2e0774032fa01236c4883bc43e57b8c.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 80, + 306, + 185, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 306, + 185, + 318 + ], + "spans": [ + { + "bbox": [ + 80, + 306, + 185, + 318 + ], + "type": "inline_equation", + "content": "C_{\\mathrm{centers}} \\gets C_{\\mathrm{centers}} \\cup \\mathbf{C}_{\\mathrm{new}}" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 81, + 319, + 189, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 319, + 189, + 331 + ], + "spans": [ + { + "bbox": [ + 81, + 319, + 189, + 331 + ], + "type": "text", + "content": "Update remaining vectors:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 65, + 348, + 293, + 376 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 348, + 293, + 376 + ], + "spans": [ + { + "bbox": [ + 65, + 348, + 293, + 376 + ], + "type": "interline_equation", + "content": "\\mathbf {U} \\leftarrow \\mathbf {U} \\backslash \\left(\\mathbf {C} _ {\\text {n e w}} \\cup \\left\\{\\mathbf {u} _ {k} \\in \\mathbf {U} \\mid \\begin{array}{c} \\exists \\mathbf {u} _ {i} \\in \\mathbf {C} _ {\\text {n e w}} \\\\ \\text {s u c h t h a t} d _ {i k} \\leq d _ {c} \\end{array} \\right\\}\\right)", + "image_path": "b85f536cc482df694edec26fea498cd1df299190cf870327598c2a9ca5a099c6.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 81, + 383, + 160, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 383, + 160, + 396 + ], + "spans": [ + { + "bbox": [ + 81, + 383, + 160, + 396 + ], + "type": "text", + "content": "if " + }, + { + "bbox": [ + 81, + 383, + 160, + 396 + ], + "type": "inline_equation", + "content": "|\\mathbf{C}_{\\mathrm{new}}| < T" + }, + { + "bbox": [ + 81, + 383, + 160, + 396 + ], + "type": "text", + "content": " then" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 96, + 396, + 279, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 396, + 279, + 407 + ], + "spans": [ + { + "bbox": [ + 96, + 396, + 279, + 407 + ], + "type": "text", + "content": "Order remaining vectors " + }, + { + "bbox": [ + 96, + 396, + 279, + 407 + ], + "type": "inline_equation", + "content": "\\mathbf{U}" + }, + { + "bbox": [ + 96, + 396, + 279, + 407 + ], + "type": "text", + "content": " by decreasing " + }, + { + "bbox": [ + 96, + 396, + 279, + 407 + ], + "type": "inline_equation", + "content": "\\rho_{i}" + }, + { + "bbox": [ + 96, + 396, + 279, + 407 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 96, + 408, + 279, + 420 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 408, + 279, + 420 + ], + "spans": [ + { + "bbox": [ + 96, + 408, + 279, + 420 + ], + "type": "inline_equation", + "content": "\\mathbf{U}\\gets \\mathrm{Sort}(\\mathbf{U},\\mathrm{key} = \\rho_{i},\\mathrm{order} = \\mathrm{descending})" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 96, + 420, + 238, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 420, + 238, + 430 + ], + "spans": [ + { + "bbox": [ + 96, + 420, + 238, + 430 + ], + "type": "text", + "content": "Call Iterative Center Identification:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 96, + 431, + 314, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 431, + 314, + 444 + ], + "spans": [ + { + "bbox": [ + 96, + 431, + 314, + 444 + ], + "type": "inline_equation", + "content": "C_{\\mathrm{centers}} \\gets" + }, + { + "bbox": [ + 96, + 431, + 314, + 444 + ], + "type": "text", + "content": " IterativeCenterIdentification(" + }, + { + "bbox": [ + 96, + 431, + 314, + 444 + ], + "type": "inline_equation", + "content": "C_{\\mathrm{centers}}" + }, + { + "bbox": [ + 96, + 431, + 314, + 444 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 96, + 431, + 314, + 444 + ], + "type": "inline_equation", + "content": "\\mathbf{U}" + }, + { + "bbox": [ + 96, + 431, + 314, + 444 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 96, + 431, + 314, + 444 + ], + "type": "inline_equation", + "content": "d_c" + }, + { + "bbox": [ + 96, + 431, + 314, + 444 + ], + "type": "text", + "content": ")" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 96, + 445, + 155, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 445, + 155, + 455 + ], + "spans": [ + { + "bbox": [ + 96, + 445, + 155, + 455 + ], + "type": "text", + "content": "return " + }, + { + "bbox": [ + 96, + 445, + 155, + 455 + ], + "type": "inline_equation", + "content": "C_{\\mathrm{centers}}" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 81, + 456, + 108, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 456, + 108, + 466 + ], + "spans": [ + { + "bbox": [ + 81, + 456, + 108, + 466 + ], + "type": "text", + "content": "end if" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 66, + 468, + 109, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 468, + 109, + 478 + ], + "spans": [ + { + "bbox": [ + 66, + 468, + 109, + 478 + ], + "type": "text", + "content": "end while" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 66, + 479, + 126, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 479, + 126, + 491 + ], + "spans": [ + { + "bbox": [ + 66, + 479, + 126, + 491 + ], + "type": "text", + "content": "return " + }, + { + "bbox": [ + 66, + 479, + 126, + 491 + ], + "type": "inline_equation", + "content": "C_{\\mathrm{centers}}" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 66, + 502, + 241, + 512 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 502, + 241, + 512 + ], + "spans": [ + { + "bbox": [ + 66, + 502, + 241, + 512 + ], + "type": "text", + "content": "Function: Iterative Center Identification" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 66, + 514, + 295, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 514, + 295, + 537 + ], + "spans": [ + { + "bbox": [ + 66, + 514, + 295, + 537 + ], + "type": "text", + "content": "Inputs: Remaining vectors " + }, + { + "bbox": [ + 66, + 514, + 295, + 537 + ], + "type": "inline_equation", + "content": "\\mathbf{U}" + }, + { + "bbox": [ + 66, + 514, + 295, + 537 + ], + "type": "text", + "content": " (ordered by " + }, + { + "bbox": [ + 66, + 514, + 295, + 537 + ], + "type": "inline_equation", + "content": "\\rho_{i}" + }, + { + "bbox": [ + 66, + 514, + 295, + 537 + ], + "type": "text", + "content": "), current cluster center set " + }, + { + "bbox": [ + 66, + 514, + 295, + 537 + ], + "type": "inline_equation", + "content": "C_{\\mathrm{centers}}" + }, + { + "bbox": [ + 66, + 514, + 295, + 537 + ], + "type": "text", + "content": ", cutoff distance " + }, + { + "bbox": [ + 66, + 514, + 295, + 537 + ], + "type": "inline_equation", + "content": "d_{c}" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 66, + 538, + 259, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 538, + 259, + 550 + ], + "spans": [ + { + "bbox": [ + 66, + 538, + 259, + 550 + ], + "type": "text", + "content": "Outputs: Updated cluster center indices " + }, + { + "bbox": [ + 66, + 538, + 259, + 550 + ], + "type": "inline_equation", + "content": "C_{\\text{centers}}" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 66, + 550, + 140, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 550, + 140, + 560 + ], + "spans": [ + { + "bbox": [ + 66, + 550, + 140, + 560 + ], + "type": "text", + "content": "for all " + }, + { + "bbox": [ + 66, + 550, + 140, + 560 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_i\\in \\mathbf{U}" + }, + { + "bbox": [ + 66, + 550, + 140, + 560 + ], + "type": "text", + "content": " do" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 82, + 561, + 201, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 561, + 201, + 574 + ], + "spans": [ + { + "bbox": [ + 82, + 561, + 201, + 574 + ], + "type": "text", + "content": "if " + }, + { + "bbox": [ + 82, + 561, + 201, + 574 + ], + "type": "inline_equation", + "content": "\\min_{\\mathbf{u}_s\\in C_{\\mathrm{centers}}}d_{is} > d_c" + }, + { + "bbox": [ + 82, + 561, + 201, + 574 + ], + "type": "text", + "content": " then" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 96, + 574, + 198, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 574, + 198, + 586 + ], + "spans": [ + { + "bbox": [ + 96, + 574, + 198, + 586 + ], + "type": "inline_equation", + "content": "C_{\\mathrm{centers}} \\gets C_{\\mathrm{centers}} \\cup \\{\\mathbf{u}_i\\}" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 82, + 586, + 108, + 595 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 586, + 108, + 595 + ], + "spans": [ + { + "bbox": [ + 82, + 586, + 108, + 595 + ], + "type": "text", + "content": "end if" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 66, + 597, + 99, + 607 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 597, + 99, + 607 + ], + "spans": [ + { + "bbox": [ + 66, + 597, + 99, + 607 + ], + "type": "text", + "content": "end for" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 66, + 609, + 125, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 609, + 125, + 620 + ], + "spans": [ + { + "bbox": [ + 66, + 609, + 125, + 620 + ], + "type": "text", + "content": "return " + }, + { + "bbox": [ + 66, + 609, + 125, + 620 + ], + "type": "inline_equation", + "content": "C_{\\mathrm{centers}}" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 55, + 653, + 296, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 653, + 296, + 712 + ], + "spans": [ + { + "bbox": [ + 55, + 653, + 296, + 712 + ], + "type": "text", + "content": "predefined set of two-dimensional points. The figure shows that only DBDPC and DBSCAN identify isolated points as distinct clusters, a crucial feature for visual token reduction, as these points contain unique and thus potentially valuable information. We note that, for DBSCAN, these isolated" + } + ] + } + ], + "index": 31 + }, + { + "type": "image", + "bbox": [ + 317, + 72, + 438, + 151 + ], + "blocks": [ + { + "bbox": [ + 317, + 72, + 438, + 151 + ], + "lines": [ + { + "bbox": [ + 317, + 72, + 438, + 151 + ], + "spans": [ + { + "bbox": [ + 317, + 72, + 438, + 151 + ], + "type": "image", + "image_path": "bc4c9351f57e236b7889f9a925abb16135b22bfdb57da6783bf7f4536a9da928.jpg" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_body" + } + ], + "index": 32 + }, + { + "type": "image", + "bbox": [ + 317, + 156, + 438, + 231 + ], + "blocks": [ + { + "bbox": [ + 317, + 156, + 438, + 231 + ], + "lines": [ + { + "bbox": [ + 317, + 156, + 438, + 231 + ], + "spans": [ + { + "bbox": [ + 317, + 156, + 438, + 231 + ], + "type": "image", + "image_path": "922cdcffc7ad49660bc707651ee240d440711902642b8a121d23064d7db11c56.jpg" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 242, + 555, + 287 + ], + "lines": [ + { + "bbox": [ + 313, + 242, + 555, + 287 + ], + "spans": [ + { + "bbox": [ + 313, + 242, + 555, + 287 + ], + "type": "text", + "content": "Figure 12. An illustrative example of the difference in clustering characteristics between DBDPC and other clustering algorithms. Two-dimensional points and the Euclidean distance were used for illustration purposes." + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_caption" + } + ], + "index": 33 + }, + { + "type": "image", + "bbox": [ + 438, + 72, + 558, + 151 + ], + "blocks": [ + { + "bbox": [ + 438, + 72, + 558, + 151 + ], + "lines": [ + { + "bbox": [ + 438, + 72, + 558, + 151 + ], + "spans": [ + { + "bbox": [ + 438, + 72, + 558, + 151 + ], + "type": "image", + "image_path": "20af734f0650efb1295f05d68e52bb6af99dca6da5f563347afdb01dacadf413.jpg" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_body" + } + ], + "index": 34 + }, + { + "type": "image", + "bbox": [ + 438, + 156, + 558, + 231 + ], + "blocks": [ + { + "bbox": [ + 438, + 156, + 558, + 231 + ], + "lines": [ + { + "bbox": [ + 438, + 156, + 558, + 231 + ], + "spans": [ + { + "bbox": [ + 438, + 156, + 558, + 231 + ], + "type": "image", + "image_path": "e4261b04fef109488d07af722c5985ca2a3835c943c104f033a8b1cf2613810e.jpg" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_body" + } + ], + "index": 35 + }, + { + "bbox": [ + 313, + 305, + 555, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 305, + 555, + 497 + ], + "spans": [ + { + "bbox": [ + 313, + 305, + 555, + 497 + ], + "type": "text", + "content": "points may be identified as noise, depending on the chosen hyperparameters. Moreover, DBDPC partitions both the left and right groups of points into the same number of clusters, maintaining consistency despite the higher density on the left side. In contrast, DPC tends to form a greater number of clusters in high-density regions while creating large clusters in low-density areas, whereas DBSCAN follows the opposite pattern, producing large clusters in high-density regions. In the context of visual token reduction, merging points within these large clusters can result in information loss, leading to performance degradation and making DPC and DBSCAN less suitable than DBDPC for this task. We note that the results presented in Fig. 12 for DPC and DBSCAN may change when modifying the hyperparameters; however, the characteristics discussed above persist across different hyperparameter choices." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 313, + 508, + 538, + 521 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 508, + 538, + 521 + ], + "spans": [ + { + "bbox": [ + 313, + 508, + 538, + 521 + ], + "type": "text", + "content": "D. Efficient center identification in DBDPC" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 313, + 528, + 440, + 541 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 528, + 440, + 541 + ], + "spans": [ + { + "bbox": [ + 313, + 528, + 440, + 541 + ], + "type": "text", + "content": "D.1. A recursive approach" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 313, + 545, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 545, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 545, + 555, + 713 + ], + "type": "text", + "content": "To enhance the efficiency of the DBDPC algorithm, we introduce a recursive center identification method that reduces computational overhead while maintaining clustering accuracy. In the DBDPC algorithm, vectors are processed in descending order of their local densities " + }, + { + "bbox": [ + 313, + 545, + 555, + 713 + ], + "type": "inline_equation", + "content": "\\rho_{i}" + }, + { + "bbox": [ + 313, + 545, + 555, + 713 + ], + "type": "text", + "content": ", and a vector " + }, + { + "bbox": [ + 313, + 545, + 555, + 713 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_i" + }, + { + "bbox": [ + 313, + 545, + 555, + 713 + ], + "type": "text", + "content": " is selected as a cluster center if it is farther than the cutoff distance " + }, + { + "bbox": [ + 313, + 545, + 555, + 713 + ], + "type": "inline_equation", + "content": "d_c" + }, + { + "bbox": [ + 313, + 545, + 555, + 713 + ], + "type": "text", + "content": " from all previously selected centers. Implementing this as described in the algorithm requires sequentially iterating through all the vectors and checking distances to all previously selected centers, which does not fully leverage GPU parallelization capabilities. In the DBDPC algorithm, when two points have the same density, one is treated as if it has a higher density than the other, depending on the order of their processing. To replicate this behavior, we assign the" + } + ] + } + ], + "index": 40 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 41 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 198, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 198, + 83 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 198, + 83 + ], + "type": "text", + "content": "density of each point to its rank as:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 102, + 93, + 249, + 107 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 93, + 249, + 107 + ], + "spans": [ + { + "bbox": [ + 102, + 93, + 249, + 107 + ], + "type": "interline_equation", + "content": "\\rho_ {i} = \\operatorname {r a n k} _ {i} = \\operatorname {a r g s o r t} \\left(\\left\\{- \\rho_ {j} \\right\\} _ {j = 1} ^ {n}\\right) [ i ]", + "image_path": "ca6b341b6269ae763414e7f14233818f87ac7d1254e4de7a7b732a46874d32c5.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 116, + 295, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 116, + 295, + 152 + ], + "spans": [ + { + "bbox": [ + 55, + 116, + 295, + 152 + ], + "type": "text", + "content": "Our accelerated method leverages the quantity " + }, + { + "bbox": [ + 55, + 116, + 295, + 152 + ], + "type": "inline_equation", + "content": "\\delta_{i}" + }, + { + "bbox": [ + 55, + 116, + 295, + 152 + ], + "type": "text", + "content": ", representing the minimum distance from vector " + }, + { + "bbox": [ + 55, + 116, + 295, + 152 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_i" + }, + { + "bbox": [ + 55, + 116, + 295, + 152 + ], + "type": "text", + "content": " to any higher-density vector:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 145, + 163, + 295, + 182 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 163, + 295, + 182 + ], + "spans": [ + { + "bbox": [ + 145, + 163, + 295, + 182 + ], + "type": "interline_equation", + "content": "\\delta_ {i} = \\min _ {\\rho_ {j} > \\rho_ {i}} d _ {i j} \\tag {12}", + "image_path": "4e49980de2d30f2eec82773b96f3c29fc251423c36f5807c46fec1a9888d0ae2.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 186, + 296, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 186, + 296, + 569 + ], + "spans": [ + { + "bbox": [ + 56, + 186, + 296, + 569 + ], + "type": "text", + "content": "If " + }, + { + "bbox": [ + 56, + 186, + 296, + 569 + ], + "type": "inline_equation", + "content": "\\delta_{i} > d_{c}" + }, + { + "bbox": [ + 56, + 186, + 296, + 569 + ], + "type": "text", + "content": ", then " + }, + { + "bbox": [ + 56, + 186, + 296, + 569 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_{i}" + }, + { + "bbox": [ + 56, + 186, + 296, + 569 + ], + "type": "text", + "content": " is selected as a cluster center because it is not within " + }, + { + "bbox": [ + 56, + 186, + 296, + 569 + ], + "type": "inline_equation", + "content": "d_{c}" + }, + { + "bbox": [ + 56, + 186, + 296, + 569 + ], + "type": "text", + "content": " of any higher-density vector, which are the only potential cluster centers that can be selected before " + }, + { + "bbox": [ + 56, + 186, + 296, + 569 + ], + "type": "inline_equation", + "content": "d_{ij}" + }, + { + "bbox": [ + 56, + 186, + 296, + 569 + ], + "type": "text", + "content": " in the DBDPC algorithm. In addition, any vector within " + }, + { + "bbox": [ + 56, + 186, + 296, + 569 + ], + "type": "inline_equation", + "content": "d_{c}" + }, + { + "bbox": [ + 56, + 186, + 296, + 569 + ], + "type": "text", + "content": " of a cluster center identified using " + }, + { + "bbox": [ + 56, + 186, + 296, + 569 + ], + "type": "inline_equation", + "content": "\\delta_{i}" + }, + { + "bbox": [ + 56, + 186, + 296, + 569 + ], + "type": "text", + "content": " has a lower density than that center, as cluster centers identified using " + }, + { + "bbox": [ + 56, + 186, + 296, + 569 + ], + "type": "inline_equation", + "content": "\\delta_{i}" + }, + { + "bbox": [ + 56, + 186, + 296, + 569 + ], + "type": "text", + "content": " are not within " + }, + { + "bbox": [ + 56, + 186, + 296, + 569 + ], + "type": "inline_equation", + "content": "d_{c}" + }, + { + "bbox": [ + 56, + 186, + 296, + 569 + ], + "type": "text", + "content": " of any higher-density vector. In the DBDPC algorithm, such a vector would not be chosen as a cluster center because it violates the distance condition relative to already selected centers. By identifying these vectors early, we can exclude them from further consideration as potential centers. We repeat this process recursively: after selecting cluster centers where " + }, + { + "bbox": [ + 56, + 186, + 296, + 569 + ], + "type": "inline_equation", + "content": "\\delta_{i} > d_{c}" + }, + { + "bbox": [ + 56, + 186, + 296, + 569 + ], + "type": "text", + "content": " and excluding vectors within " + }, + { + "bbox": [ + 56, + 186, + 296, + 569 + ], + "type": "inline_equation", + "content": "d_{c}" + }, + { + "bbox": [ + 56, + 186, + 296, + 569 + ], + "type": "text", + "content": " of these centers, we process the remaining vectors. This recursion continues until the number of newly discovered cluster centers becomes small (e.g., less than 10). At that point, we fall back to the DBDPC method, processing the remaining vectors iteratively to ensure all potential centers are considered. This recursive approach reduces the number of iterations in the main loop and enhances parallelization, particularly on GPUs, by minimizing sequential computation. By leveraging " + }, + { + "bbox": [ + 56, + 186, + 296, + 569 + ], + "type": "inline_equation", + "content": "\\delta_{i}" + }, + { + "bbox": [ + 56, + 186, + 296, + 569 + ], + "type": "text", + "content": " and incorporating an early exclusion mechanism, the recursive center identification method reduces computational time while ensuring the same clustering results as the DBDPC algorithm. The recursive approach decreases the number of iterations and enhances GPU parallelization by minimizing sequential computation, making the algorithm more efficient for large datasets. The recursive center identification method is presented in Algorithm 4. We note that in practice this recursive approach reduces the computational time of the DBDPC algorithm by around 3 times." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 576, + 295, + 590 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 576, + 295, + 590 + ], + "spans": [ + { + "bbox": [ + 55, + 576, + 295, + 590 + ], + "type": "text", + "content": "D.2. Proof of correctness of the recursive approach" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 594, + 295, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 594, + 295, + 689 + ], + "spans": [ + { + "bbox": [ + 55, + 594, + 295, + 689 + ], + "type": "text", + "content": "To validate the correctness of the accelerated method, we demonstrate the following key points: selected centers are valid cluster centers, excluded vectors are not cluster centers and identifying remaining cluster centers is equivalent to identifying cluster centers on the reduced set. Proving these points suffices to establish correctness, as the remaining vectors after the recursive steps are treated the same as in the DBDPC algorithm." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 689, + 295, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 295, + 714 + ], + "type": "text", + "content": "Selected Centers Are Valid Cluster Centers In the DB-DPC algorithm, for any vector " + }, + { + "bbox": [ + 55, + 689, + 295, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_i" + }, + { + "bbox": [ + 55, + 689, + 295, + 714 + ], + "type": "text", + "content": ", only vectors with higher" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 72, + 553, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 156 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 156 + ], + "type": "text", + "content": "densities are considered for selection as cluster centers before " + }, + { + "bbox": [ + 313, + 72, + 553, + 156 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_i" + }, + { + "bbox": [ + 313, + 72, + 553, + 156 + ], + "type": "text", + "content": ". If " + }, + { + "bbox": [ + 313, + 72, + 553, + 156 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_i" + }, + { + "bbox": [ + 313, + 72, + 553, + 156 + ], + "type": "text", + "content": " is not within " + }, + { + "bbox": [ + 313, + 72, + 553, + 156 + ], + "type": "inline_equation", + "content": "d_c" + }, + { + "bbox": [ + 313, + 72, + 553, + 156 + ], + "type": "text", + "content": " of any higher-density vector (i.e., " + }, + { + "bbox": [ + 313, + 72, + 553, + 156 + ], + "type": "inline_equation", + "content": "\\delta_i > d_c" + }, + { + "bbox": [ + 313, + 72, + 553, + 156 + ], + "type": "text", + "content": ") then the distance of " + }, + { + "bbox": [ + 313, + 72, + 553, + 156 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_i" + }, + { + "bbox": [ + 313, + 72, + 553, + 156 + ], + "type": "text", + "content": " from any previously selected center cannot exceed the cutoff distance " + }, + { + "bbox": [ + 313, + 72, + 553, + 156 + ], + "type": "inline_equation", + "content": "d_c" + }, + { + "bbox": [ + 313, + 72, + 553, + 156 + ], + "type": "text", + "content": ". Consequently, " + }, + { + "bbox": [ + 313, + 72, + 553, + 156 + ], + "type": "inline_equation", + "content": "\\mathbf{u}_i" + }, + { + "bbox": [ + 313, + 72, + 553, + 156 + ], + "type": "text", + "content": " satisfies the condition for being a cluster center in the DBDPC algorithm, as it is farther than " + }, + { + "bbox": [ + 313, + 72, + 553, + 156 + ], + "type": "inline_equation", + "content": "d_c" + }, + { + "bbox": [ + 313, + 72, + 553, + 156 + ], + "type": "text", + "content": " from all centers processed earlier." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 156, + 553, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 156, + 553, + 251 + ], + "spans": [ + { + "bbox": [ + 313, + 156, + 553, + 251 + ], + "type": "text", + "content": "Excluded Vectors Are Not Cluster Centers Vectors within " + }, + { + "bbox": [ + 313, + 156, + 553, + 251 + ], + "type": "inline_equation", + "content": "d_{c}" + }, + { + "bbox": [ + 313, + 156, + 553, + 251 + ], + "type": "text", + "content": " of a cluster center identified using " + }, + { + "bbox": [ + 313, + 156, + 553, + 251 + ], + "type": "inline_equation", + "content": "\\delta_{i}" + }, + { + "bbox": [ + 313, + 156, + 553, + 251 + ], + "type": "text", + "content": " have lower densities than that center, as these centers are not within " + }, + { + "bbox": [ + 313, + 156, + 553, + 251 + ], + "type": "inline_equation", + "content": "d_{c}" + }, + { + "bbox": [ + 313, + 156, + 553, + 251 + ], + "type": "text", + "content": " to any higher density point. In the DBDPC algorithm, such vectors would not be selected as cluster centers because they are within " + }, + { + "bbox": [ + 313, + 156, + 553, + 251 + ], + "type": "inline_equation", + "content": "d_{c}" + }, + { + "bbox": [ + 313, + 156, + 553, + 251 + ], + "type": "text", + "content": " to an already selected center, violating the distance condition. Therefore, excluding these vectors early does not affect the selection of valid cluster centers." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 251, + 553, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 251, + 553, + 468 + ], + "spans": [ + { + "bbox": [ + 313, + 251, + 553, + 468 + ], + "type": "text", + "content": "Identifying Remaining Cluster Centers is Equivalent to Identifying Cluster Centers on the Reduced Set After selecting cluster centers where " + }, + { + "bbox": [ + 313, + 251, + 553, + 468 + ], + "type": "inline_equation", + "content": "\\delta_{i} > d_{c}" + }, + { + "bbox": [ + 313, + 251, + 553, + 468 + ], + "type": "text", + "content": " and excluding vectors within " + }, + { + "bbox": [ + 313, + 251, + 553, + 468 + ], + "type": "inline_equation", + "content": "d_{c}" + }, + { + "bbox": [ + 313, + 251, + 553, + 468 + ], + "type": "text", + "content": " of these centers, we focus on the reduced set of remaining vectors for further processing. The critical observation is that the previously selected cluster centers are not within " + }, + { + "bbox": [ + 313, + 251, + 553, + 468 + ], + "type": "inline_equation", + "content": "d_{c}" + }, + { + "bbox": [ + 313, + 251, + 553, + 468 + ], + "type": "text", + "content": " of any vector in the reduced set. This is ensured by the exclusion step, where all vectors within " + }, + { + "bbox": [ + 313, + 251, + 553, + 468 + ], + "type": "inline_equation", + "content": "d_{c}" + }, + { + "bbox": [ + 313, + 251, + 553, + 468 + ], + "type": "text", + "content": " of these centers have been removed. Consequently, when identifying new cluster centers within the reduced set, we do not need to consider distances to the previously selected centers, as they cannot influence the selection due to their distance. Moreover, the vectors that have been excluded are not potential cluster centers themselves. Meaning that they can not influence the center selection process. This means that any vector satisfying " + }, + { + "bbox": [ + 313, + 251, + 553, + 468 + ], + "type": "inline_equation", + "content": "\\delta > d_{c}" + }, + { + "bbox": [ + 313, + 251, + 553, + 468 + ], + "type": "text", + "content": " in the reduced set, is actually not within " + }, + { + "bbox": [ + 313, + 251, + 553, + 468 + ], + "type": "inline_equation", + "content": "d_{c}" + }, + { + "bbox": [ + 313, + 251, + 553, + 468 + ], + "type": "text", + "content": " to any higher density potential cluster center form the initial set, making it a cluster center." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 475, + 553, + 503 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 475, + 553, + 503 + ], + "spans": [ + { + "bbox": [ + 313, + 475, + 553, + 503 + ], + "type": "text", + "content": "E. On the choice of Positional IDs for clustering algorithms" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 510, + 555, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 510, + 555, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 510, + 555, + 715 + ], + "type": "text", + "content": "In our work, we benchmark four clustering algorithms: agglomerative clustering [1], k-means [2], Density Peaks Clustering (DPC) [5], and DBSCAN [14]. For each algorithm, we use the key vectors for clustering, apply a cosine similarity-based distance (as in DBDPC), and evaluate two strategies: merging the hidden states within each cluster or selecting the cluster center as a representative token. We report the best-performing approach for each algorithm. Similar to DBDPC, we assign the position ID of the cluster center to the resulting vectors. However, apart from DPC, the other clustering algorithms do not explicitly provide a cluster center. For k-means and agglomerative clustering, we select the cluster center as the point closest to the average of all points in the cluster, using keys and cosine similarity. For DBSCAN, we experimented with choosing the point connected to the most other points within the cluster and found this approach to yield slightly better results, aligning" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 97 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 97 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 97 + ], + "type": "text", + "content": "better with the principles of DBSCAN. Thus, we adopted this strategy in our tests." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 105, + 295, + 133 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 105, + 295, + 133 + ], + "spans": [ + { + "bbox": [ + 55, + 105, + 295, + 133 + ], + "type": "text", + "content": "F. More about applying ToME to Visual Language Models" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 140, + 295, + 427 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 140, + 295, + 427 + ], + "spans": [ + { + "bbox": [ + 55, + 140, + 295, + 427 + ], + "type": "text", + "content": "ToMe reduces the number of visual tokens at each layer of the transformer. For a given layer " + }, + { + "bbox": [ + 55, + 140, + 295, + 427 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 55, + 140, + 295, + 427 + ], + "type": "text", + "content": ", the process starts by splitting the tokens into two distinct sets, A and B. Each token in set A is matched with its most similar counterpart in set B, using cosine similarity based on key vectors to determine the closest pairs. The top " + }, + { + "bbox": [ + 55, + 140, + 295, + 427 + ], + "type": "inline_equation", + "content": "r_i" + }, + { + "bbox": [ + 55, + 140, + 295, + 427 + ], + "type": "text", + "content": " pairs with the highest similarity are then selected for merging. Connected components from the matched pairs are combined into single vectors, where hidden states are averaged. It is important to note that each connected component contains exactly one element from set B, and when applying ToME to Visual Language Models, this element's position ID is assigned to the merged token. In [6], the number of visual tokens was reduced by a fixed quantity " + }, + { + "bbox": [ + 55, + 140, + 295, + 427 + ], + "type": "inline_equation", + "content": "(r_i = r)" + }, + { + "bbox": [ + 55, + 140, + 295, + 427 + ], + "type": "text", + "content": ". However, this fixed reduction scheme cannot achieve more than a " + }, + { + "bbox": [ + 55, + 140, + 295, + 427 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 55, + 140, + 295, + 427 + ], + "type": "text", + "content": " reduction unless no reduction is done at later layers when the number of tokens drops below " + }, + { + "bbox": [ + 55, + 140, + 295, + 427 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 55, + 140, + 295, + 427 + ], + "type": "text", + "content": ", which goes against the gradual reduction strategy proposed in ToMe. To enable higher reduction ratios, we adopt a linearly decreasing scheduler, where the reduction is higher in early layers and decreases in later layers. This approach achieves a smaller average number of visual tokens across the network while still reducing the token count at each layer, allowing us to reach high reduction ratios effectively." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 439, + 295, + 467 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 439, + 295, + 467 + ], + "spans": [ + { + "bbox": [ + 55, + 439, + 295, + 467 + ], + "type": "text", + "content": "G. Implementation details and hyperparameters for PACT" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "text", + "content": "For all experiments on LLaVA-OneVision-7B, we set " + }, + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "inline_equation", + "content": "d_{n} = 2" + }, + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "inline_equation", + "content": "\\alpha = 1.5" + }, + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "inline_equation", + "content": "L = 4" + }, + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "text", + "content": ". While the optimal values of each parameter may vary depending on the dataset, we aim to evaluate the real-world effectiveness of our approach by using consistent values across all testing datasets. The results in Tab. 2 were obtained using " + }, + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "inline_equation", + "content": "d_{c} = 0.21" + }, + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "inline_equation", + "content": "\\lambda = 0.55" + }, + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "text", + "content": " while those in Tab. 1 were obtained using " + }, + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "inline_equation", + "content": "d_{c} = 0.17" + }, + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "inline_equation", + "content": "\\alpha = 0.7" + }, + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "text", + "content": ". Additionally, to demonstrate the performance of our approach at different reduction ratios, we vary " + }, + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "inline_equation", + "content": "d_{c}" + }, + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "text", + "content": " and report the results. The values of the fixed parameters " + }, + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "inline_equation", + "content": "d_{n}" + }, + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "text", + "content": " were chosen by performing a grid search on SeedBench [24], which is why we do not include Seed-Bench in the testing datasets. It is important to note that finding the optimal parameters for all testing datasets is not the focus of this study, as this would require extensive testing of different values for " + }, + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "inline_equation", + "content": "d_{c}" + }, + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "inline_equation", + "content": "d_{n}" + }, + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "text", + "content": " on all test sets. Such an approach would not accurately reflect the real-world performance of our method. Instead, we chose to only vary " + }, + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "inline_equation", + "content": "d_{c}" + }, + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 55, + 474, + 295, + 713 + ], + "type": "text", + "content": " to evaluate the effectiveness of our approach at different reduction ratios. When" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 313, + 72, + 555, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 555, + 384 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 555, + 384 + ], + "type": "text", + "content": "testing on SeedBench, we found that a pruning ratio higher than " + }, + { + "bbox": [ + 313, + 72, + 555, + 384 + ], + "type": "inline_equation", + "content": "60\\%" + }, + { + "bbox": [ + 313, + 72, + 555, + 384 + ], + "type": "text", + "content": " harms performance. Therefore, we vary the pruning ratio between " + }, + { + "bbox": [ + 313, + 72, + 555, + 384 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 313, + 72, + 555, + 384 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 72, + 555, + 384 + ], + "type": "inline_equation", + "content": "60\\%" + }, + { + "bbox": [ + 313, + 72, + 555, + 384 + ], + "type": "text", + "content": " and test across different values of " + }, + { + "bbox": [ + 313, + 72, + 555, + 384 + ], + "type": "inline_equation", + "content": "d_{c}" + }, + { + "bbox": [ + 313, + 72, + 555, + 384 + ], + "type": "text", + "content": ". When testing PACT on LLaVA-1.6-Mistral-7B, Qwen2-VL-7B-Instruct and InternVL2-8B. We use the same values of " + }, + { + "bbox": [ + 313, + 72, + 555, + 384 + ], + "type": "inline_equation", + "content": "d_{n}" + }, + { + "bbox": [ + 313, + 72, + 555, + 384 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 72, + 555, + 384 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 313, + 72, + 555, + 384 + ], + "type": "text", + "content": " as when testing on LLaVA-OneVision-7B. We note that these hyperparameters may not be optimal; however, as we aim to test the generalizability of our approach, we opt to use the same hyperparameters across models. Figure 13, Figure 14 and Figure 15 show the maximum distance between the keys at several layers of the language model for LLaVA-1.6-Mistral-7B, Qwen2-VL-7B-Instruct and InternVL2-8B. Following the same approach for LLaVA-OneVision-7B, we choose " + }, + { + "bbox": [ + 313, + 72, + 555, + 384 + ], + "type": "inline_equation", + "content": "L = 4" + }, + { + "bbox": [ + 313, + 72, + 555, + 384 + ], + "type": "text", + "content": " for Qwen2-VL-7B-Instruct and " + }, + { + "bbox": [ + 313, + 72, + 555, + 384 + ], + "type": "inline_equation", + "content": "L = 7" + }, + { + "bbox": [ + 313, + 72, + 555, + 384 + ], + "type": "text", + "content": " for InternVL2-8B. We note that the choice of the reduction layer for InternVL2-8B is not as evident as for LLaVA-OneVision-7B and Qwen2-VL-7B-Instruct, as the increase in maximum distance from one layer to the next is sometimes minimal, making it unclear which layer offers the best balance between accuracy and computational efficiency. However, since we do not aim to experimentally determine the optimal reduction layer, we end up choosing " + }, + { + "bbox": [ + 313, + 72, + 555, + 384 + ], + "type": "inline_equation", + "content": "L = 7" + }, + { + "bbox": [ + 313, + 72, + 555, + 384 + ], + "type": "text", + "content": ", as the maximum distance between keys is increased by an acceptable amount between the seventh and eighth layer. Following the same approach we use " + }, + { + "bbox": [ + 313, + 72, + 555, + 384 + ], + "type": "inline_equation", + "content": "L = 7" + }, + { + "bbox": [ + 313, + 72, + 555, + 384 + ], + "type": "text", + "content": " for LLaVA-1.6-Mistral-7B." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 394, + 548, + 407 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 394, + 548, + 407 + ], + "spans": [ + { + "bbox": [ + 313, + 394, + 548, + 407 + ], + "type": "text", + "content": "H. More about test datasets and used metrics" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 415, + 555, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 415, + 555, + 628 + ], + "spans": [ + { + "bbox": [ + 313, + 415, + 555, + 628 + ], + "type": "text", + "content": "For evaluating the different approaches, we use LMMs-Eval [26] and aim to follow the same dataset splits and metrics as used in [27]. We detail the used splits and metrics in Tab. 4. Some datasets require evaluation using a GPT model through the OPENAI API or other closed-source models. However, for many datasets the version of the closed-source model used in evaluating LLaVA-OneVision in [27] is no longer available. So we use the latest version of GPT-4 for our assessments at the time of publication (gpt-4o-2024-08-06). We also observed that when calling a closed-source model like GPT-4 via an API, the responses are not fully deterministic, even with a temperature set to zero, introducing some noise into the evaluation metrics. To reduce this noise, we exclude all these datasets when testing across different reduction ratios. On the other hand, for Tab. 1, we exclude MMVet, Vibe-Eval, VideoChatGPT, MM-LiveBench, and LLaVA-Wilder as they have high inference times, which would dominate the throughput calculation." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 630, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 630, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 630, + 556, + 715 + ], + "type": "text", + "content": "For certain datasets, such as DocVQA, InfoVQA, and TextVQA, we use the validation split contrary to [27]. This choice allows us to test various reduction ratios and approaches without requiring submission to the test server, which would be impractical for extensive testing. For datasets requiring a test set submission (EgoSchema and PerceptionTest), where either the validation set is typically" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 66, + 71, + 286, + 214 + ], + "blocks": [ + { + "bbox": [ + 66, + 71, + 286, + 214 + ], + "lines": [ + { + "bbox": [ + 66, + 71, + 286, + 214 + ], + "spans": [ + { + "bbox": [ + 66, + 71, + 286, + 214 + ], + "type": "image", + "image_path": "2018b92a3ceb38fe58df20c32ffb33e26839e109a7d40b3c17c99039061b7f48.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 56, + 225, + 294, + 258 + ], + "lines": [ + { + "bbox": [ + 56, + 225, + 294, + 258 + ], + "spans": [ + { + "bbox": [ + 56, + 225, + 294, + 258 + ], + "type": "text", + "content": "Figure 13. Illustration of the maximum distance between the keys of visual tokens for the first 10 layers of LLaVA-1.6-Mistral-7B before the application of rotary embeddings." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 276, + 294, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 276, + 294, + 430 + ], + "spans": [ + { + "bbox": [ + 56, + 276, + 294, + 430 + ], + "type": "text", + "content": "not used for evaluation or does not exist, we report the submission-based metrics evaluated directly on the test set. As explained above, for some datasets our evaluation setup differs from the one used for evaluating LLaVA-OneVision in [27], which may result in variations in the reported results for this model on certain datasets. This is primarily due to the use of validation splits for DocVQA, InfoVQA, and TextVQA, as well as the reliance on GPT-based metrics for some datasets (a common practice for these benchmarks, making alternative evaluation difficult). Nevertheless, our comparisons remain fair, as the same evaluation procedure is consistently applied across all approaches and reduction ratios." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 57, + 432, + 294, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 432, + 294, + 491 + ], + "spans": [ + { + "bbox": [ + 57, + 432, + 294, + 491 + ], + "type": "text", + "content": "We note that when using reduction methods, results may include slight variations due to edge cases where distances or importance metrics for different vectors are equal. That's why we report results based on the average of three different runs for each dataset." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 57, + 491, + 294, + 621 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 491, + 294, + 621 + ], + "spans": [ + { + "bbox": [ + 57, + 491, + 294, + 621 + ], + "type": "text", + "content": "Notably, when testing on Qwen2-VL-7B-Instruct without reduction, some datasets encountered GPU out-of-memory errors (MLVU, VideoMME, and ActivityNet Perception) which we excluded from the test set. Additionally, results on ScienceQA were quite low when tested without reduction (0.132), leading to its exclusion from testing as well. We note that, as we use LMM-Eval [26] for evaluation, results differ for some datasets from the officially reported results, as prompts are sometimes not formatted in the same manner. This observation also applies to InternVL2-8B." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 57, + 634, + 214, + 646 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 634, + 214, + 646 + ], + "spans": [ + { + "bbox": [ + 57, + 634, + 214, + 646 + ], + "type": "text", + "content": "I. Additional numerical results" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 57, + 654, + 294, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 654, + 294, + 712 + ], + "spans": [ + { + "bbox": [ + 57, + 654, + 294, + 712 + ], + "type": "text", + "content": "Table 8 and Tab. 9 show a comparison of DBDPC and various clustering algorithms for a reduction ratio of approximately " + }, + { + "bbox": [ + 57, + 654, + 294, + 712 + ], + "type": "inline_equation", + "content": "60\\%" + }, + { + "bbox": [ + 57, + 654, + 294, + 712 + ], + "type": "text", + "content": " on LLaVA-OneVision-7B across multiple datasets. The results demonstrate that DBDPC outperforms other clustering algorithms in visual token reduction for the" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 325, + 72, + 542, + 233 + ], + "blocks": [ + { + "bbox": [ + 325, + 72, + 542, + 233 + ], + "lines": [ + { + "bbox": [ + 325, + 72, + 542, + 233 + ], + "spans": [ + { + "bbox": [ + 325, + 72, + 542, + 233 + ], + "type": "image", + "image_path": "1cf0b451a2b9e5ba443debf65c14f58ca11284800c693f17f3908ef610c04e2b.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 315, + 246, + 553, + 278 + ], + "lines": [ + { + "bbox": [ + 315, + 246, + 553, + 278 + ], + "spans": [ + { + "bbox": [ + 315, + 246, + 553, + 278 + ], + "type": "text", + "content": "Figure 14. Illustration of the maximum distance between the keys of visual tokens for the first 10 layers of Qwen2-VL-7B-Instruct before the application of rotary embeddings." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 316, + 317, + 553, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 317, + 553, + 472 + ], + "spans": [ + { + "bbox": [ + 316, + 317, + 553, + 472 + ], + "type": "text", + "content": "majority of the datasets. Additionally, the tables show that the clustering process for DBDPC is significantly faster than that of other clustering algorithms. Table 10 presents a comparison of EUTI-based visual token pruning and FastV for a reduction ratio of approximately " + }, + { + "bbox": [ + 316, + 317, + 553, + 472 + ], + "type": "inline_equation", + "content": "60\\%" + }, + { + "bbox": [ + 316, + 317, + 553, + 472 + ], + "type": "text", + "content": " on LLaVA-OneVision-7B across various datasets. The results indicate that EUTI outperforms FastV on most datasets while also being more computationally efficient. Table 15 shows that using keys for distance calculations in DBDPC outperforms hidden states across the majority of the test datasets. Also, we present a comparison between PACT and other visual reduction techniques for InternVL2-8B, and LLaVA-1.6-Mistral-7B across different datasets in Tab. 6, and Tab. 7." + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 326, + 512, + 542, + 667 + ], + "blocks": [ + { + "bbox": [ + 326, + 512, + 542, + 667 + ], + "lines": [ + { + "bbox": [ + 326, + 512, + 542, + 667 + ], + "spans": [ + { + "bbox": [ + 326, + 512, + 542, + 667 + ], + "type": "image", + "image_path": "4f0258050dc5d3c9c34a559923d1aba15b7d6193026a4795cf763cec369c5ee9.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 315, + 680, + 553, + 712 + ], + "lines": [ + { + "bbox": [ + 315, + 680, + 553, + 712 + ], + "spans": [ + { + "bbox": [ + 315, + 680, + 553, + 712 + ], + "type": "text", + "content": "Figure 15. Illustration of the maximum distance between the keys of visual tokens for the first 10 layers of InternVL2-8B before the application of rotary embeddings." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 58, + 71, + 294, + 96 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 71, + 294, + 96 + ], + "spans": [ + { + "bbox": [ + 58, + 71, + 294, + 96 + ], + "type": "text", + "content": "J. Ablation study : Additional numerical results" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 58, + 105, + 294, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 105, + 294, + 236 + ], + "spans": [ + { + "bbox": [ + 58, + 105, + 294, + 236 + ], + "type": "text", + "content": "Table 11 shows a comparison between PACT, DBDPC, and EUTI for a reduction ratio of approximately " + }, + { + "bbox": [ + 58, + 105, + 294, + 236 + ], + "type": "inline_equation", + "content": "70\\%" + }, + { + "bbox": [ + 58, + 105, + 294, + 236 + ], + "type": "text", + "content": ", applied on LLaVA-OneVision-7B. The results demonstrate that PACT, which combines both clustering and pruning, outperforms the other two methods that are either clustering-based or pruning-based across various datasets. More importantly, DBDPC and EUTI exhibit a significant drop in performance on some of the datasets, which is not the case for PACT. We note that numerical results for the ablation studies conducted on DBDPC, EUTI, and PACT can be found in Tab. 12, Tab. 13 and Tab. 14." + } + ] + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 734, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 734, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 734, + 308, + 741 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 138, + 272, + 470, + 586 + ], + "blocks": [ + { + "bbox": [ + 55, + 197, + 555, + 263 + ], + "lines": [ + { + "bbox": [ + 55, + 197, + 555, + 263 + ], + "spans": [ + { + "bbox": [ + 55, + 197, + 555, + 263 + ], + "type": "text", + "content": "Table 4. Dataset Splits, Subsets, and Evaluation Metrics Used in Our Experiments. Default indicates the use of the standard test split or cases where only one split/subset is available. The evaluation metrics employed are those commonly used for the respective datasets and generally the ones proposed in the official papers. For GPT-based scores (or any model-based scores), this means that a GPT model was used during evaluation, typically to extract answers from the generated output text, which are then matched with the ground truth to calculate accuracy using exact matches. When accuracy is reported, it generally implies that only an exact match is considered a correct answer." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 138, + 272, + 470, + 586 + ], + "lines": [ + { + "bbox": [ + 138, + 272, + 470, + 586 + ], + "spans": [ + { + "bbox": [ + 138, + 272, + 470, + 586 + ], + "type": "table", + "html": "
DatasetSplitSubsetEvaluation Metric
VideoMMEDefaultNo subtitlesAccuracy
MMEDefaultDefaultMME Perception Score
DocVQAValidationDefaultANLS
MLVUDefaultDefaultAccuracy
LLaVA-InterleaveDefaultOut-domainAccuracy
ChartQAValidationDefaultRelaxed Accuracy
MMBenchValidationEnglishGPT-based Score
MuirBenchDefaultDefaultAccuracy
ScienceQADefaultVision onlyAccuracy
MMMUValidationDefaultAccuracy
AI2DDefaultDefaultAccuracy
InfographicVQAValidationDefaultANLS
MMStarDefaultDefaultAccuracy
ActivityNetQADefaultDefaultGPT-based Score
MM-LiveBenchDefault2406GPT-based Score
LLaVA-WilderDefaultSmallGPT-based Score
MathVerseDefaultVision miniGPT-based Score
MathVistaDefaultTestminiGPT-based Score
MMVetDefaultDefaultGPT-based Score
Vibe-EvalDefaultDefaultREKA-based Score
VideoChatGPTDefaultDefaultGPT-based Score
EgoSchemaDefaultDefaultSubmission
PerceptionTestDefaultMultiple Choice QASubmission
TextVQAValidationDefaultOfficial metric
", + "image_path": "4fddf2bbe561b78a54a7f7ffc2b2961bf3a801236cc2b45eb20735bc0616777f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 157, + 95, + 455, + 367 + ], + "blocks": [ + { + "bbox": [ + 139, + 75, + 471, + 86 + ], + "lines": [ + { + "bbox": [ + 139, + 75, + 471, + 86 + ], + "spans": [ + { + "bbox": [ + 139, + 75, + 471, + 86 + ], + "type": "text", + "content": "Table 5. Performance of PACT on LLaVA-OneVision-7B using " + }, + { + "bbox": [ + 139, + 75, + 471, + 86 + ], + "type": "inline_equation", + "content": "d_{c} = 0.17" + }, + { + "bbox": [ + 139, + 75, + 471, + 86 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 139, + 75, + 471, + 86 + ], + "type": "inline_equation", + "content": "\\alpha = 0.7" + }, + { + "bbox": [ + 139, + 75, + 471, + 86 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 157, + 95, + 455, + 367 + ], + "lines": [ + { + "bbox": [ + 157, + 95, + 455, + 367 + ], + "spans": [ + { + "bbox": [ + 157, + 95, + 455, + 367 + ], + "type": "table", + "html": "
DatasetPACT (Ours)
MetricRed. RatioProc. TimeAlgo. Time
VideoMME57.769.2%0.3210.021
MME1571.072.1%0.2260.017
DocVQA85.471.1%0.4670.026
MLVU64.869.2%0.3220.022
LLaVA-Interleave62.272.2%0.1330.010
ChartQA77.371.4%0.3090.019
MMBench79.972.0%0.1340.010
MuirBench42.470.9%0.1750.013
ScienceQA93.572.0%0.1300.010
MMMU48.872.6%0.1030.007
AI2D81.272.5%0.1730.013
InfographicVQA61.570.0%0.4030.023
MMStar59.572.3%0.1470.011
ActivityNetQA55.170.0%0.4090.029
MathVerse17.176.0%0.3500.021
MathVista62.173.0%0.2600.015
EgoSchema60.069.1%0.3200.021
PerceptionTest52.370.0%0.3010.023
TextVQA75.569.2%0.3200.023
", + "image_path": "b373501466d7e91d7e686c23bd0ab34b12c97e4ccb856350d0e6b824e1c308d9.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 58, + 399, + 555, + 562 + ], + "blocks": [ + { + "bbox": [ + 100, + 380, + 509, + 391 + ], + "lines": [ + { + "bbox": [ + 100, + 380, + 509, + 391 + ], + "spans": [ + { + "bbox": [ + 100, + 380, + 509, + 391 + ], + "type": "text", + "content": "Table 6. Comparison of PACT with FastV, VTW, and ToME applied on InternVL2-8B on Various Datasets." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 58, + 399, + 555, + 562 + ], + "lines": [ + { + "bbox": [ + 58, + 399, + 555, + 562 + ], + "spans": [ + { + "bbox": [ + 58, + 399, + 555, + 562 + ], + "type": "table", + "html": "
DatasetNo ReductionPACT (Ours)FastVVTWToME
MetricProc. TimeMetricRed. RatioProc. TimeMetricProc. TimeMetricProc. TimeMetricProc. Time
VideoMME52.20.24751.168.4%0.15151.10.15551.00.14250.20.190
MME1621.00.1711591.969.9%0.1211588.70.1181627.00.1111533.30.155
MLVU50.60.43949.768.8%0.32648.80.32549.50.33329.30.343
LLaVA-Interleave40.00.39039.071.2%0.26539.70.26339.60.23036.70.316
MMBench81.90.16180.470.4%0.11880.20.11680.20.10970.80.165
MuirBench35.70.43234.470.3%0.24935.60.25833.70.21032.70.296
ScienceQA97.10.16597.170.8%0.11895.80.11695.70.10989.90.151
MMMU48.50.16748.070.6%0.12647.70.12647.80.11947.50.156
AI2D82.50.14681.470.7%0.11278.50.11079.60.10574.40.142
MMStar59.00.17956.770.4%0.18654.20.18453.40.35255.10.156
PerceptionTest57.70.30056.866.0%0.20356.20.21334.10.19255.20.228
EgoSchema54.00.24053.767.0%0.15553.10.16332.20.14652.90.172
ActivityNet51.70.24051.366.0%0.15351.00.16130.80.14350.40.171
MM-LiveBench68.03.07567.368.0%2.14067.02.24740.42.00366.62.354
", + "image_path": "47d6de9bf39a12c1ede01a2d5400e2a30ae9211ca9514afd9801eed89d29348e.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 58, + 592, + 555, + 708 + ], + "blocks": [ + { + "bbox": [ + 69, + 572, + 541, + 583 + ], + "lines": [ + { + "bbox": [ + 69, + 572, + 541, + 583 + ], + "spans": [ + { + "bbox": [ + 69, + 572, + 541, + 583 + ], + "type": "text", + "content": "Table 7. Comparison of PACT with FastV, Prumerge, and Hired applied on LLaVA-1.6-Mistral-7B across multiple datasets." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 58, + 592, + 555, + 708 + ], + "lines": [ + { + "bbox": [ + 58, + 592, + 555, + 708 + ], + "spans": [ + { + "bbox": [ + 58, + 592, + 555, + 708 + ], + "type": "table", + "html": "
DatasetNo ReductionPACT (Ours)FastVPrumergeHired
MetricProc. TimeMetricRed. RatioProc. TimeMetricProc. TimeMetricProc. TimeMetricProc. Time
MME1500.00.2371507.170.3%0.1591503.90.1581485.40.1661497.00.168
DocVQA70.00.36367.167.1%0.28464.50.28148.80.29365.80.295
ChartQA52.90.33249.370.1%0.25948.90.26136.00.26446.10.266
MMBench68.20.22668.071.9%0.15567.90.15466.20.16067.60.164
ScienceQA73.00.19772.771.5%0.14473.20.14571.70.14872.90.149
MMMU34.20.23934.971.5%0.17134.70.16933.90.18033.90.180
AI2D67.50.23367.570.9%0.16067.00.15864.50.16565.90.166
InfographicVQA36.90.29435.666.2%0.22633.40.22931.90.23631.60.236
MMStar36.20.37536.771.9%0.35036.60.40035.10.34535.90.345
", + "image_path": "ed6b232143a7b8525b16cacf76dc3a10e834e1960654e6bfa3d7bf0f470aac2c.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 57, + 112, + 555, + 250 + ], + "blocks": [ + { + "bbox": [ + 55, + 81, + 555, + 103 + ], + "lines": [ + { + "bbox": [ + 55, + 81, + 555, + 103 + ], + "spans": [ + { + "bbox": [ + 55, + 81, + 555, + 103 + ], + "type": "text", + "content": "Table 8. Comparison of DBDPC and Agglomerative Clustering Methods for a Reduction Ratio of approximately " + }, + { + "bbox": [ + 55, + 81, + 555, + 103 + ], + "type": "inline_equation", + "content": "60\\%" + }, + { + "bbox": [ + 55, + 81, + 555, + 103 + ], + "type": "text", + "content": " on LLaVA-OneVision-7B." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 57, + 112, + 555, + 250 + ], + "lines": [ + { + "bbox": [ + 57, + 112, + 555, + 250 + ], + "spans": [ + { + "bbox": [ + 57, + 112, + 555, + 250 + ], + "type": "table", + "html": "
DatasetDBDPC (ours)Agg. (Single Linkage)Agg. (Average Linkage)Agg. (Complete Linkage)
MetricProc. TimeAlgo. TimeMetricProc. TimeAlgo. TimeMetricProc. TimeAlgo. TimeMetricProc. TimeAlgo. Time
VideoMME57.40.3890.04057.61.5041.14857.01.6571.31657.91.6901.350
MME1563.80.2550.0281554.10.9940.7381559.21.1230.8681563.01.1510.897
DocVQA84.70.5300.04483.61.8991.37984.42.1851.66284.32.3081.777
MLVU64.20.3840.03964.01.5741.22965.21.6751.32964.81.7001.355
LLaVA-Interleave62.10.1510.01662.00.4250.27761.50.4460.29861.40.4460.298
ChartQA76.00.3660.03174.51.1510.79875.81.2530.91075.81.2770.930
MMBench80.10.1510.01679.50.4270.27779.70.4370.29179.80.4490.299
MuirBench43.20.2150.02341.40.6670.47442.00.7270.53442.00.7380.544
ScienceQA94.70.1470.01594.80.3940.25094.70.4160.27194.70.4130.269
MMMU48.30.1100.00948.40.2180.11049.30.2320.12148.20.2250.117
AI2D80.70.2020.02280.80.6670.47280.60.7480.55180.10.7530.557
InfographicVQA61.60.5280.04657.11.6081.18159.81.8181.39459.81.8701.436
MMStar60.50.1670.01860.20.5070.34459.80.5560.39060.50.5600.395
", + "image_path": "7c034be835ae6416aad9e6ae87aa0a942f2b6dec0befc8b2ea5c6961e0d1085d.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 58, + 308, + 555, + 445 + ], + "blocks": [ + { + "bbox": [ + 55, + 277, + 553, + 300 + ], + "lines": [ + { + "bbox": [ + 55, + 277, + 553, + 300 + ], + "spans": [ + { + "bbox": [ + 55, + 277, + 553, + 300 + ], + "type": "text", + "content": "Table 9. Comparison of DBDPC, DBSCAN, DPC, and KMeans Clustering Methods for a Reduction Ratio of approximately " + }, + { + "bbox": [ + 55, + 277, + 553, + 300 + ], + "type": "inline_equation", + "content": "60\\%" + }, + { + "bbox": [ + 55, + 277, + 553, + 300 + ], + "type": "text", + "content": " on LLaVA-OneVision-7B." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 58, + 308, + 555, + 445 + ], + "lines": [ + { + "bbox": [ + 58, + 308, + 555, + 445 + ], + "spans": [ + { + "bbox": [ + 58, + 308, + 555, + 445 + ], + "type": "table", + "html": "
DatasetDBDPC (ours)DBSCANDPCKMeans
MetricProc. TimeAlgo. TimeMetricProc. TimeAlgo. TimeMetricProc. TimeAlgo. TimeMetricProc. TimeAlgo. Time
VideoMME57.40.3890.04057.40.3940.04656.90.7290.39257.31.7251.383
MME1563.80.2550.0281560.30.2740.0361549.90.6370.3801549.91.2540.999
DocVQA84.70.5300.04484.20.5330.04483.00.9500.44279.62.0591.544
MLVU64.20.3840.03964.20.3910.04864.20.7270.38264.61.7251.377
LLaVA-Interleave62.10.1510.01660.40.1590.02663.90.2580.12162.30.7110.566
ChartQA76.00.3660.03175.20.3690.03475.20.7580.41574.21.3991.059
MMBench80.10.1510.01678.10.1530.02079.50.3260.17979.90.7020.552
MuirBench43.20.2150.02342.40.2190.02842.00.4660.27342.90.9550.763
ScienceQA94.70.1470.01591.20.1500.02494.30.2510.11793.40.6610.518
MMMU48.30.1100.00947.80.1300.03048.30.1870.07848.20.5000.391
AI2D80.70.2020.02279.20.2020.02280.30.4550.26481.11.0620.860
InfographicVQA61.60.5280.04654.00.5310.05256.60.9750.54757.81.7801.357
MMStar60.50.1670.01856.60.1790.02860.60.3760.21360.20.8280.661
", + "image_path": "2da5772f2eeadcbe64d4a7c934552b43391ca5494d5f1c72760427bf69ecdd85.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 104, + 504, + 504, + 701 + ], + "blocks": [ + { + "bbox": [ + 55, + 472, + 555, + 495 + ], + "lines": [ + { + "bbox": [ + 55, + 472, + 555, + 495 + ], + "spans": [ + { + "bbox": [ + 55, + 472, + 555, + 495 + ], + "type": "text", + "content": "Table 10. Comparison of EUTI-based visual tokens pruning and FastV for a Reduction Ratio of approximately " + }, + { + "bbox": [ + 55, + 472, + 555, + 495 + ], + "type": "inline_equation", + "content": "60\\%" + }, + { + "bbox": [ + 55, + 472, + 555, + 495 + ], + "type": "text", + "content": " on LLaVA-OneVision-7B." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 104, + 504, + 504, + 701 + ], + "lines": [ + { + "bbox": [ + 104, + 504, + 504, + 701 + ], + "spans": [ + { + "bbox": [ + 104, + 504, + 504, + 701 + ], + "type": "table", + "html": "
DatasetEUTI (Ours)FastV
MetricProc. TimeAlgo. TimeMetricProc. TimeAlgo. Time
VideoMME58.40.3510.00557.60.3810.040
MME1560.00.2560.0041570.70.2830.025
DocVQA86.50.5210.00585.30.5590.032
MLVU64.30.3550.00463.10.3910.040
LLaVA-Interleave58.90.1400.00359.70.1520.007
ChartQA78.60.3440.00478.00.3630.016
MMBench80.20.1420.00379.20.1510.005
MuirBench40.00.1910.00340.80.2040.009
ScienceQA93.60.1370.00392.30.1490.006
MMMU48.80.1010.00247.30.1100.003
AI2D81.10.1910.00380.30.2020.009
InfographicVQA63.00.4250.00560.30.4730.040
MMStar59.60.1590.00359.60.1700.007
", + "image_path": "53c029aac0c8feed6528662a19f0a98c00dc5cad77900b4645d2e4b3090fe95c.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 733, + 309, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 733, + 309, + 742 + ], + "spans": [ + { + "bbox": [ + 302, + 733, + 309, + 742 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 58, + 149, + 555, + 327 + ], + "blocks": [ + { + "bbox": [ + 55, + 118, + 555, + 142 + ], + "lines": [ + { + "bbox": [ + 55, + 118, + 555, + 142 + ], + "spans": [ + { + "bbox": [ + 55, + 118, + 555, + 142 + ], + "type": "text", + "content": "Table 11. Comparison of PACT with Standalone Methods: EUTI-based Visual Token Pruning and DBDPC Clustering Algorithm for a Reduction Ratio of approximately " + }, + { + "bbox": [ + 55, + 118, + 555, + 142 + ], + "type": "inline_equation", + "content": "70\\%" + }, + { + "bbox": [ + 55, + 118, + 555, + 142 + ], + "type": "text", + "content": ", applied on LLaVA-OneVision-7B." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 58, + 149, + 555, + 327 + ], + "lines": [ + { + "bbox": [ + 58, + 149, + 555, + 327 + ], + "spans": [ + { + "bbox": [ + 58, + 149, + 555, + 327 + ], + "type": "table", + "html": "
DatasetPACTDBDPCEUTI
MetricProc. TimeAlgo. TimeMetricProc. TimeAlgo. TimeMetricProc. TimeAlgo. Time
VideoMME57.50.3210.02157.30.3420.04058.40.3050.005
MME1558.70.2260.0171543.70.2430.0281595.90.2130.004
DocVQA84.30.4670.02682.50.5000.04485.30.4560.005
MLVU64.60.3220.02263.90.3580.03964.40.2910.004
LLaVA-Interleave63.90.1330.01062.60.1490.01657.10.1270.003
ChartQA77.20.3110.01975.10.3330.03178.20.2920.004
MMBench80.20.1340.01079.70.1470.01679.60.1280.003
MuirBench42.80.1750.01343.20.2110.02339.90.1640.003
ScienceQA93.60.1300.01093.80.1420.01592.20.1230.003
MMMU48.90.1030.00747.20.1090.00948.90.0960.002
AI2D80.60.1730.01380.50.1910.02279.90.1640.003
InfographicVQA61.90.4030.02358.80.4650.04660.40.3600.005
MMStar59.50.1470.01159.50.1630.01859.20.1400.003
", + "image_path": "c869c2029d490794dbb3bde4a68093b4736da9447da98d5fdbfcfc98533dc48b.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 61, + 482, + 545, + 663 + ], + "blocks": [ + { + "bbox": [ + 55, + 429, + 555, + 473 + ], + "lines": [ + { + "bbox": [ + 55, + 429, + 555, + 473 + ], + "spans": [ + { + "bbox": [ + 55, + 429, + 555, + 473 + ], + "type": "text", + "content": "Table 12. Ablation Studies on DBDPC-based visual token reduction for a Reduction Ratio of approximately " + }, + { + "bbox": [ + 55, + 429, + 555, + 473 + ], + "type": "inline_equation", + "content": "60\\%" + }, + { + "bbox": [ + 55, + 429, + 555, + 473 + ], + "type": "text", + "content": " on LLaVA-OneVision-7B. We report only the metrics, as processing time is similar across different approaches. When ablating the Center Position-IDs assignment, we reorder the hidden states based on the mean of the Position-IDs of the elements in each cluster and then assign position IDs sequentially." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 61, + 482, + 545, + 663 + ], + "lines": [ + { + "bbox": [ + 61, + 482, + 545, + 663 + ], + "spans": [ + { + "bbox": [ + 61, + 482, + 545, + 663 + ], + "type": "table", + "html": "
DBDPCw/o Center Position-IDs assignmentw/o Proportional Attentionw/o Merging
VideoMME57.458.057.957.5
MME1563.81539.31523.81476.9
DocVQA84.728.284.283.1
MLVU64.261.263.963.5
LLaVA-Interleave62.169.663.263.6
ChartQA76.024.876.074.4
MMBench80.176.180.179.6
MuirBench43.226.543.244.0
ScienceQA94.767.494.293.6
MMMU48.334.547.648.2
AI2D80.743.080.479.9
InfographicVQA61.617.859.858.7
MMStar60.558.959.659.1
", + "image_path": "1e4b5a29c5c158bc8faf221aaeebaba29da94b0f519896f6bd7fbc3df2a52aee.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 127, + 100, + 480, + 281 + ], + "blocks": [ + { + "bbox": [ + 55, + 69, + 555, + 94 + ], + "lines": [ + { + "bbox": [ + 55, + 69, + 555, + 94 + ], + "spans": [ + { + "bbox": [ + 55, + 69, + 555, + 94 + ], + "type": "text", + "content": "Table 13. Ablation Studies on the EUTI-based Visual Token Pruning for a Reduction Ratio of approximately " + }, + { + "bbox": [ + 55, + 69, + 555, + 94 + ], + "type": "inline_equation", + "content": "70\\%" + }, + { + "bbox": [ + 55, + 69, + 555, + 94 + ], + "type": "text", + "content": ", applied on LLaVA-OneVision-7B. We report only the metrics, as processing time is similar across different approaches." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 127, + 100, + 480, + 281 + ], + "lines": [ + { + "bbox": [ + 127, + 100, + 480, + 281 + ], + "spans": [ + { + "bbox": [ + 127, + 100, + 480, + 281 + ], + "type": "table", + "html": "
DatasetEUTIEUTI w/o NormNorm (EUTI w/o Global Query)
VideoMME58.457.656.6
MME1595.91573.41576.5
DocVQA85.385.179.7
MLVU64.363.063.1
LLaVA-Interleave57.157.952.9
ChartQA78.276.476.7
MMBench79.679.479.4
MuirBench40.040.539.6
ScienceQA92.291.893.5
MMMU48.949.349.2
AI2D79.979.979.7
InfographicVQA60.460.149.3
MMStar59.257.459.2
", + "image_path": "7c35b30d6fa0b6f7bac48158d8e6bf94a4fbad368daa8af814b0768af4faa148.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 162, + 316, + 444, + 497 + ], + "blocks": [ + { + "bbox": [ + 55, + 285, + 555, + 309 + ], + "lines": [ + { + "bbox": [ + 55, + 285, + 555, + 309 + ], + "spans": [ + { + "bbox": [ + 55, + 285, + 555, + 309 + ], + "type": "text", + "content": "Table 14. Ablation Study on Pruned Tokens Recovery for a Reduction Ratio of approximately " + }, + { + "bbox": [ + 55, + 285, + 555, + 309 + ], + "type": "inline_equation", + "content": "70\\%" + }, + { + "bbox": [ + 55, + 285, + 555, + 309 + ], + "type": "text", + "content": ". We remove the token recovery step, which is equivalent to Setting " + }, + { + "bbox": [ + 55, + 285, + 555, + 309 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 55, + 285, + 555, + 309 + ], + "type": "text", + "content": " to Zero. We report only the metrics, as processing time is similar across both approaches." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 162, + 316, + 444, + 497 + ], + "lines": [ + { + "bbox": [ + 162, + 316, + 444, + 497 + ], + "spans": [ + { + "bbox": [ + 162, + 316, + 444, + 497 + ], + "type": "table", + "html": "
DatasetPACTPACT w/o Pruned-Token Recovery
VideoMME57.657.4
MME1556.71576.3
DocVQA84.384.3
MLVU64.664.2
LLaVA-Interleave63.959.6
ChartQA76.476.4
MMBench79.979.8
MuirBench42.842.2
ScienceQA93.393.6
MMMU48.548.5
AI2D80.680.6
InfographicVQA61.961.3
MMStar75.174.9
", + "image_path": "15271f48c5f424341279adde269dbd210f51db8fd8ea993e5b4552d4548344f8.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 196, + 532, + 410, + 713 + ], + "blocks": [ + { + "bbox": [ + 55, + 500, + 555, + 525 + ], + "lines": [ + { + "bbox": [ + 55, + 500, + 555, + 525 + ], + "spans": [ + { + "bbox": [ + 55, + 500, + 555, + 525 + ], + "type": "text", + "content": "Table 15. Ablation Study on Keys Utilization in DBDPC for a Reduction Ratio of approximately " + }, + { + "bbox": [ + 55, + 500, + 555, + 525 + ], + "type": "inline_equation", + "content": "60\\%" + }, + { + "bbox": [ + 55, + 500, + 555, + 525 + ], + "type": "text", + "content": ". Metrics are reported, as processing time is similar across both configurations." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 196, + 532, + 410, + 713 + ], + "lines": [ + { + "bbox": [ + 196, + 532, + 410, + 713 + ], + "spans": [ + { + "bbox": [ + 196, + 532, + 410, + 713 + ], + "type": "table", + "html": "
DatasetDBDPCDBDPC w/o Keys
VideoMME57.4057.22
MME1563.801526.18
DocVQA84.7080.50
MLVU64.2064.60
LLaVA-Interleave62.1060.80
ChartQA76.0068.80
MMBench80.1079.21
MuirBench43.2041.40
ScienceQA94.7091.90
MMMU48.3047.90
AI2D80.7079.10
InfographicVQA61.656.70
MMStar60.5058.40
", + "image_path": "7d8540bfc1f13e20032a59f4e2f73397b14276eca2d36cc26cb8541cdde345ac.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09037/7cd60625-0d0d-4f3d-b38f-7ab687b11e36_content_list.json b/data/2025/2504_09xxx/2504.09037/7cd60625-0d0d-4f3d-b38f-7ab687b11e36_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..389a16cc7b3dc51ac8181115f236d437c70b489a --- /dev/null +++ b/data/2025/2504_09xxx/2504.09037/7cd60625-0d0d-4f3d-b38f-7ab687b11e36_content_list.json @@ -0,0 +1,5734 @@ +[ + { + "type": "text", + "text": "A Survey of Frontiers in LLM Reasoning: Inference Scaling, Learning to Reason, and Agentic Systems", + "text_level": 1, + "bbox": [ + 112, + 98, + 880, + 151 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zixuan Ke\\*", + "bbox": [ + 112, + 181, + 210, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Fangkai Jiao", + "bbox": [ + 114, + 203, + 236, + 219 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yifei Ming*", + "bbox": [ + 114, + 224, + 210, + 241 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xuan-Phi Nguyen*", + "bbox": [ + 114, + 247, + 271, + 262 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Austin Xu\\*", + "bbox": [ + 114, + 268, + 209, + 281 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Do Xuan Long†,‡", + "bbox": [ + 114, + 287, + 254, + 304 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Minzhi Li† ‡", + "bbox": [ + 114, + 310, + 210, + 324 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Chengwei Qin", + "bbox": [ + 114, + 332, + 238, + 347 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Peifeng Wang*", + "bbox": [ + 114, + 354, + 236, + 369 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Silvio Savarese*", + "bbox": [ + 114, + 375, + 245, + 388 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Caiming Xiong*", + "bbox": [ + 114, + 396, + 246, + 411 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Shafiq Joty\\*,", + "bbox": [ + 114, + 417, + 227, + 433 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "\\*Salesforce AI Research", + "Nanyang Technological University" + ], + "bbox": [ + 112, + 445, + 348, + 473 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "† National University of Singapore", + "$^\\ddagger I^2 R$ , $A^{*}STAR$ , Singapore" + ], + "bbox": [ + 377, + 445, + 604, + 473 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "zixuan ke@salesforce.com", + "bbox": [ + 715, + 184, + 883, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "jiaofangkai@hotmail.com", + "bbox": [ + 715, + 205, + 883, + 219 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "yifei.ming@salesforce.com", + "bbox": [ + 710, + 227, + 882, + 241 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "xnguyen@salesforce.com", + "bbox": [ + 723, + 250, + 882, + 261 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "austin.xu@salesforce.com", + "bbox": [ + 717, + 270, + 882, + 282 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "xuanlong.do@u.nus.edu", + "bbox": [ + 728, + 291, + 882, + 304 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "li.minzhi@u.nus.edu", + "bbox": [ + 750, + 311, + 882, + 324 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "chengwei003@e.ntu.edu.sg", + "bbox": [ + 710, + 334, + 882, + 347 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "peifeng.wang@salesforce.com", + "bbox": [ + 692, + 354, + 882, + 369 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ssavarese@salesforce.com", + "bbox": [ + 715, + 377, + 882, + 388 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "cxiong@salesforce.com", + "bbox": [ + 735, + 398, + 882, + 411 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "sjoty@salesforce.com", + "bbox": [ + 745, + 419, + 882, + 431 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 457, + 535, + 539, + 551 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Reasoning is a fundamental cognitive process that enables logical inference, problem-solving, and decision-making. With the rapid advancement of large language models (LLMs), reasoning has emerged as a key capability that distinguishes advanced AI systems from conventional models that empower chatbots. In this survey, we categorize existing methods along two orthogonal dimensions: (1) Regimes, which define the stage at which reasoning is achieved (either at inference time or through dedicated training); and (2) Architectures, which determine the components involved in the reasoning process, distinguishing between standalone LLMs and agentic compound systems that incorporate external tools, and multiagent collaborations. Within each dimension, we analyze two key perspectives: (1) Input level, which focuses on techniques that construct high-quality prompts that the LLM condition on; and (2) Output level, which methods that refine multiple sampled candidates to enhance reasoning quality. This categorization provides a systematic understanding of the evolving landscape of LLM reasoning, highlighting emerging trends such as the shift from inference-scaling to learning-to-reason (e.g., DeepSeek-R1), and the transition to agentic workflows (e.g., OpenAI Deep Research, Manus Agent). Additionally, we cover a broad spectrum of learning algorithms, from supervised fine-tuning to reinforcement learning such as PPO and GRPO, and the training of reasoners and verifiers. We also examine key designs of agentic workflows, from established patterns like generator-evaluator and LLM debate to recent innovations. Finally, we identify emerging trends, such as domain-specific reasoning systems, and open challenges, such as evaluation and data quality. This survey aims to provide AI researchers and practitioners with a comprehensive foundation for advancing reasoning in LLMs, paving the way for more sophisticated and reliable AI systems.", + "bbox": [ + 169, + 577, + 826, + 910 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.09037v3 [cs.AI] 5 Aug 2025", + "bbox": [ + 22, + 284, + 60, + 710 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/a76aac3ee69b5e7e56531440627ceca98535c9e7ba6066bdb1b715823ec14a60.jpg", + "image_caption": [ + "Figure 1: The LLM reasoning surge. We show the cumulative number (in thousands) of papers published from 2022 to 2/2025, based on Semantic Scholar keyword search. Research on reasoning regimes and agent architectures has accelerated notably since the introduction of Chain-of-Thought (CoT) in 2022. This growth is further influenced by other major developments, such as the release of ChatGPT (Ouyang et al., 2022) in 9/2022, and popularity of in-context learning (Brown et al., 2020) as an inference-time optimization method." + ], + "image_footnote": [], + "bbox": [ + 196, + 103, + 514, + 327 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/dc2db5b5890553285102598a559db40fef121a42cef38c9a947e2f9dd3832578.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 103, + 795, + 327 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 112, + 449, + 261, + 465 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Reasoning is the cognitive process of analyzing evidence, constructing arguments, and applying logic to form conclusions or make informed judgments. It is essential to many intellectual pursuits, including decision-making, problem-solving, and critical thinking. The study of reasoning spans multiple disciplines—philosophy (Passmore, 1961), psychology (Wason & JohnsonLaird, 1972), and computer science (Huth & Ryan, 2004)—as it provides insights into how individuals interpret information, evaluate alternatives, and develop sound conclusions using logic.", + "bbox": [ + 109, + 486, + 883, + 578 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Recently, large language models (LLMs) have demonstrated a range of emerging abilities, such as in-context learning (Dong et al., 2024), role playing (Shanahan et al., 2023b) and domain adaptation (Ke et al., 2023; 2025a; Ke & Liu, 2023) as they scale, with reasoning becoming one of the most critical capabilities. As shown in Figure 1, this area has rapidly gained research attention, often referred to as LLM reasoning or reasoning language model (RLM) (Besta et al., 2025). The increasing focus on this topic is understandable, as reasoning capability is: (i) Challenging, requiring multi-step processing beyond the token-by-token generative nature of auto-regressive LLMs; (ii) Fundamental, as it is a core aspect of intelligence, particularly in planning and strategic decision-making; and, most importantly, (iii) Promising, as recent advances in LLMs hint at a viable path forward. Given these factors, reasoning is widely regarded as a prerequisite for more advanced AI systems approaching Artificial General Intelligence (AGI), beyond the conventional AI that aims to closely follow instruction (Duenas & Ruiz, 2024).", + "bbox": [ + 109, + 584, + 883, + 750 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Reasoning requires LLMs to go beyond directly producing an answer from a question; instead, they must generate the thinking process (implicitly or explicitly) in the form of 'question $\\rightarrow$ reasoning steps $\\rightarrow$ answer'. It has been shown that scaling pre-training may not be the optimal solution for improving reasoning (Snell et al., 2025; OpenAI, 2025). Instead, one popular approach to achieve this is the well-known chain-of-thought (CoT) prompting (Wei et al., 2022b), which demonstrates that by modifying the prompt (e.g., 'Let us think step by step') or in-context samples, LLMs can elicit a step-by-step reasoning process at test time without additional training. Such intuitive prompting techniques have been shown to substantially improve LLMs' reasoning accuracy (Wei et al., 2022b). Building on this, the ability of LLMs to reason effectively depends on two factors: how and at what stage reasoning is achieved, and what components are involved in the reasoning process. Accordingly, in this survey, we categorize existing research into two orthogonal dimensions: (1) Regime, refers to whether reasoning is achieved through inference-time strategies (aka. inference-time", + "bbox": [ + 109, + 758, + 883, + 925 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "scaling) or through direct learning and adaptation (learning to reason); and (2) Architecture, refers to whether reasoning happens within a single, standalone LLM or within an interactive, agentic system.", + "bbox": [ + 109, + 103, + 883, + 133 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "These two dimensions are orthogonal, meaning different regimes can be applied to the same architecture, and different architectures can operate under the same regime. The intersection of these dimensions allows for a more comprehensive and systematic organization of reasoning techniques, encompassing most approaches studied to date while highlighting key trends, such as the shift from inference scaling to learning-to-reason and from standalone LLMs to agentic systems. Notably, most prior surveys have focused on only one or two of these dimensions, typically inference scaling and standalone LLMs, rarely considering both together (see detailed comparison later). By introducing this categorization, we aim to provide a structured perspective that clarifies the diverse landscape of LLM reasoning and establishes a foundation for future research.", + "bbox": [ + 109, + 141, + 883, + 263 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "1.1 Reasoning Regimes", + "text_level": 1, + "bbox": [ + 112, + 279, + 305, + 296 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Inference scaling CoT prompting demonstrates the potential to scale inference-time (test-time) reasoning. It has also been shown that optimal scaling of test-time compute can be more effective than scaling model parameters (Snell et al., 2024), as it improves generalization through enhanced flexibility in prompt and workflow design. Building on this, inference scaling techniques have emerged, allowing additional test-time computation before generating an answer. The key idea is that instead of updating the LLM itself, these methods aim to select the best trajectories to improve reasoning.", + "bbox": [ + 109, + 306, + 883, + 398 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Several variants of prompting methods (Paranjape et al., 2021; Sanh et al., 2022; Mishra et al., 2022) have been introduced, providing structured prompts to enhance reasoning. Additionally, inference scaling optimizes reasoning through search and planning (Dua et al., 2022; Zhou et al., 2023a; Khot et al., 2023; Suzgun & Kalai, 2024a). One key challenge in search and planning is evaluating the quality of candidate solutions. However, evaluating reasoning quality is inherently difficult, even for humans. Existing approaches can be categorized based on whether they judge the final outcome, i.e., outcome reward models (ORMs) (Hendrycks et al., 2021b), or the reasoning process, i.e., process reward models (PRMs) (Lightman et al., 2024).", + "bbox": [ + 109, + 405, + 883, + 512 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "One of the most notable milestones in this direction is OpenAI's o1 (09/2024) (OpenAI et al., 2024), which demonstrate the effectiveness of inference-time scaling in complex tasks like mathematics, coding and scientific problem-solving:", + "bbox": [ + 109, + 518, + 883, + 564 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "\"We have found that the performance of o1 consistently improves with more reinforcement learning (train-time compute) and with more time spent thinking (test-time compute). The constraints on scaling this approach differ substantially from those of LLM pretraining, and we are continuing to investigate them.\" — OpenAI o1 release blog", + "bbox": [ + 169, + 583, + 826, + 643 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Learning-to-reason Another approach to unleash the deliberate thinking is updating the LLM through training. Unlike inference scaling, learning-to-reason aims to enhance reasoning capabilities through dedicated training, reducing reliance on costly inference-time computations. However, a key challenge in this regime is the scarcity of training data, as step-by-step human-annotated reasoning trajectories are prohibitively expensive to collect. To address this, research has focused on automatically generating such trajectories and developing effective training strategies to leverage them. For example, supervised fine-tuning with long CoT (Muennighoff et al., 2025) or preference learning with reasoning preference data, with DPO (Rafailov et al., 2023) as a representative approach. More recent approaches even bypass reasoning annotation by using reinforcement learning (RL), with recent work like GRPO (Shao et al., 2024) demonstrating remarkable success in this direction. A significant milestone in this direction is DeepSeek-R1 (01/2025) (DeepSeek-AI et al., 2025), an open-source model that achieves performance comparable to OpenAI's o1 while requiring far fewer computational resources. It further reveals that RL alone is possible to learn the sophisticated behaviors just as the test-time computation increase:", + "bbox": [ + 109, + 662, + 883, + 861 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "\"One of the most remarkable aspects of this self-evolution is the emergence of sophisticated behaviors as the test-time computation increases. Behaviors such as reflection—where the model revisits and reevaluates its previous steps—and the exploration of alternative ap-", + "bbox": [ + 169, + 878, + 826, + 926 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/60f990a388a33bd4469511e2fcb952ddf14d2a5717e7f7f12189e6a1dacfb1bf.jpg", + "image_caption": [ + "Figure 2: The proposed categorization over regimes, architectures, and unified perspectives in this survey." + ], + "image_footnote": [], + "bbox": [ + 114, + 99, + 883, + 280 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "proaches to problem-solving arise spontaneously. These behaviors are not explicitly programmed but instead emerge as a result of the model's interaction with the reinforcement learning environment.\" — DeepSeek-R1 'Aha moment'", + "bbox": [ + 169, + 343, + 826, + 388 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "1.2 Reasoning System Architecture", + "text_level": 1, + "bbox": [ + 112, + 417, + 393, + 431 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Standalone LLM and agentic systems Orthogonal to the regimes, studies have explored architectural advancements in LLM reasoning, moving beyond next-token prediction in standalone models to embrace agentic systems—AI systems that exhibit interactivity and autonomy to refine reasoning and decision-making. These systems go beyond the challenges of inference scaling or learning to reason; they introduce system-level complexities, such as designing workflows and coordinating potentially conflicting actions.", + "bbox": [ + 109, + 448, + 883, + 525 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Single-Agent and multi-agent systems To distinguish agentic systems from standalone LLMs, we adopt the perspective of Kapoor et al. (2024), framing agentic behavior as a spectrum. We categorize these systems into two families: single-agent and multi-agent. In single-agent systems, a single LLM interacts with tools in its environment to refine reasoning, actions, and perceptions. These tools include external knowledge bases (Ke et al., 2024; Hammane et al., 2024; Sun et al., 2023), verifiers (Wan et al., 2024c; Guan et al., 2025), and practical applications like code interpreters, calendars, and maps (Yu et al., 2023b; Lu et al., 2024a). By leveraging these resources, the LLM iteratively enhances its decision-making and problem-solving capabilities. Recent milestones in single-agent systems, such as Grok 3 Deep Search (02/2025) and OpenAI Deep Research (02/2025), demonstrate how agents interact with the web to significantly improve reasoning, perform tasks like information retrieval, use code interpreters for calculations, and aggregate data from multiple sources.", + "bbox": [ + 109, + 547, + 883, + 715 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "\"Deep research independently discovers, reasons about, and consolidates insights from across the web. To accomplish this, it was trained on real-world tasks requiring browser and Python tool use ... While o1 demonstrates impressive capabilities in coding, math, and other technical domains, many real-world challenges demand extensive context and information gathering from diverse online sources.\" — OpenAI deep research release blog", + "bbox": [ + 169, + 744, + 826, + 819 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The second family, multi-agent systems, goes beyond agent-environment interactions by enabling agent-agent communication. Each agent takes on a distinct role and exchanges messages with others. Key challenges include designing effective communication protocols—whether collaborative (Chen et al., 2023c) or adversarial (Liang et al., 2023b)—and coordinating actions to reach consensus on the final action for the environment. A recent example of this potential is Manus, a popular product showcasing the power of multi-agent systems.", + "bbox": [ + 109, + 849, + 883, + 925 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "1.3 Unified Perspectives", + "text_level": 1, + "bbox": [ + 112, + 103, + 308, + 118 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Although inference scaling and learning-to-reason take different approaches to improving reasoning, they are inherently connected. Inference scaling focuses on selecting the best reasoning trajectories, while learning-to-reason leverages both good and bad trajectories as training data. To unify these approaches, we categorize reasoning trajectory collection techniques in both regimes based on two key perspectives: input and output. At the input level, techniques modify or augment prompts to guide the LLM toward desirable reasoning paths. At the output level, the LLM generates multiple candidate responses, which are then evaluated, ranked, or refined. This framework highlights that many inference scaling techniques—such as prompt modification or trajectory search—can be repurposed for trajectory collection in learning-to-reason (as described in Section 3 and Section 5). Moreover, this connection shows that the two approaches are complementary: inference scaling methods can be applied to models trained under learning-to-reason, motivating the development of inference-aware learning-to-reason methods (Section 5.4).", + "bbox": [ + 109, + 130, + 883, + 297 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "These aspects are also effective across different architectures. Similar to standalone LLMs, we categorize techniques based on input and output perspectives. However, to align with agentic system conventions, we use perception as input (to an agent) and action as output (of an agent) in single-agent systems. For multi-agent systems, we consider communication as input (to a participating agent) and coordination as output (of the system). This analogy provides a unified perspective across regimes and architectures, offering a systematic and generalizable framework for analyzing LLM reasoning (see Figure 2).", + "bbox": [ + 109, + 304, + 883, + 397 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "1.4 Goal and Structure of the Survey", + "text_level": 1, + "bbox": [ + 112, + 412, + 408, + 429 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The goal of this survey is to provide a comprehensive overview of key algorithmic details and major milestones in LLM reasoning research, particularly since the emergence of Chain-of-Thought (CoT), across both regime and architecture dimensions. We believe this is a timely and valuable contribution to the community, given the clear acceleration in research following CoT's introduction in 2022 (Figure 1). The rapid growth in studies exploring all aspects of LLM reasoning—from regimes and architectures to training algorithms—highlights the increasing importance and utility of reasoning capabilities in advancing the field.", + "bbox": [ + 109, + 440, + 883, + 532 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Figure 2 provides an overview of the categorization in this survey, organized along two orthogonal dimensions. Within each architecture, there are two key perspectives to consider. The first perspective is input, or perception, or communication. This concerns how to construct a better prompt, refine the given observations from the environment, or establish protocols for exchanging messages with other agents. The second is output—encompassing action or coordination—which involves aggregating outputs, enhancing actions, or coordinating actions to produce a final result. While the figure illustrates high-level categorizations, the following sections delve into more specific terms. For example, 'input' is discussed in terms of constructing prompts (see e.g., Sections 3.1.1 and 5.1.1), while 'output' relates to optimizing output and collecting high-quality trajectories (e.g., Sections 3.1.2 and 5.1.2).", + "bbox": [ + 109, + 539, + 880, + 676 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Figure 3 outlines the structure of this survey. We start with a brief introduction to the background, covering key terminologies, components, regimes, and architectures (Section 2). The subsequent sections explore inference scaling (Section 3), learning algorithms for reasoners and verifiers (Section 4), and learning to reason (Section 5). Within the discussions on inference scaling and learning to reason, we examine three key architectures: Standalone LLMs, Single-Agent systems, and Multi-Agent systems. Finally, Section 6 summarizes key insights and discusses open challenges and future directions.", + "bbox": [ + 109, + 681, + 883, + 773 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "1.5 Comparison to Related Surveys", + "text_level": 1, + "bbox": [ + 112, + 791, + 393, + 806 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Reasoning in LLMs has long been a fundamental challenge in the field. Earlier works, such as Huang & Chang (2023), provide a comprehensive overview of the evolution of informal deductive reasoning covering developments prior to the emergence of LLM agents and Reasoning Language Models (RLMs). Our work extends this discussion by focusing on LLM agents and RLMs. Qiao et al. (2023b) offer a detailed summary of advancements in LLM reasoning, with a particular emphasis on prompting techniques. In contrast, we offer a broader range of regimes (from inference to training) and architectures (from standalone LLM to multi-agent systems). Readers interested in a formal definition and taxonomy of natural language reasoning—grounded", + "bbox": [ + 109, + 818, + 880, + 925 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/67d40ba5d0383a1d1ca495c78f52298962e74f965a5a0cc2b362840f3cacbaab.jpg", + "image_caption": [ + "Figure 3: Taxonomy of LLM reasoning research organized in this survey by regimes (inference scaling, learning to reason) and architectures (standalone LLM, single-agent, multi-agent). Each leaf node includes examples from the literature that focus on the corresponding category." + ], + "image_footnote": [], + "bbox": [ + 112, + 99, + 903, + 743 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "in philosophical foundations—may refer to Yu et al. (2024a), which focuses specifically on this direction and is complementary to our scope.", + "bbox": [ + 111, + 825, + 883, + 858 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Improvements in LLM reasoning are closely tied to advancements in a variety of techniques. Dong et al. (2024) present a comprehensive survey on in-context learning (ICL), while Zhou et al. (2024c) explore the interpretation and analysis of ICL from both theoretical and empirical perspectives. In contrast, our work organizes ICL techniques under different regimes—standalone LLMs, single-agent, and multi-agent", + "bbox": [ + 111, + 864, + 883, + 925 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "systems—highlighting how these techniques evolve and interact within each setting. Recent studies suggest that enhancements in reasoning are often linked to inference scaling. Dong et al. (2024) provide an extensive review of inference-time self-improvement, and Welleck et al. (2024) offer a survey focused on three key themes: token-level generation algorithms, meta-generation algorithms, and efficient generation. Following the release of Reasoning Language Models (RLMs) such as OpenAI's o1 and DeepSeek's R1, there has been a significant increase in research dedicated to learning-to-reason approaches. Zeng et al. (2024) and Xu et al. (2025d) provide thorough surveys on these emerging developments. However, these surveys primarily focus on LLMs, and do not address agentic or multi-agent reasoning settings in depth.", + "bbox": [ + 109, + 103, + 883, + 224 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Research on LLM reasoning has predominantly centered on logical and mathematical reasoning. Liu et al. (2025a) offer a comprehensive survey of logical reasoning in LLMs, delving into its theoretical foundations and associated benchmarks. In their position paper, Yang et al. (2024d) underscore the pivotal role of formal mathematical reasoning, showcasing its superiority over traditional NLP-based methods in generating verifiable proofs and automated feedback. Their work outlines progress in theorem proving and auto-formalization while identifying key challenges that remain. While we cover domain-specific reasoning in Section 6.1.3, we refer readers to Liu et al. (2025a) and Yang et al. (2024d) for a more in-depth treatment of these topics.", + "bbox": [ + 109, + 231, + 883, + 339 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Reasoning is a critical capability in agentic systems (Pezeshkpour et al., 2024; Masterman et al., 2024). While numerous reviews focus on agent systems (Xi et al., 2023; Kapoor et al., 2024), discussions on reasoning within these systems remain limited. A concurrent work by Besta et al. (2025) introduces a comprehensive and modular framework for RLMs that systematically organizes key components such as reasoning structures, strategies, benchmarks and learning algorithms. However, their work does not delve into agentic and multiagent LLM systems.1", + "bbox": [ + 109, + 344, + 883, + 435 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "This survey provides a comprehensive overview of major milestones in LLM reasoning research, emphasizing two key dimensions: (1) the evolution of learning schemes—from inference scaling to learning-to-reason approaches—and (2) architectural advancements—from single LLMs to multi-agent systems. These dimensions summarize recent progress and lay the groundwork for future reasoning LLMs and agentic systems. We unify techniques under input and output perspectives, clarifying what must be customized or designed when building reasoning systems. Additionally, we detail essential techniques, including a comparison of the latest learning algorithms (e.g., RL) and an in-depth discussion of refiners and verifiers, which are critical for facilitating reasoning. Given these contributions, our survey is timely, offering AI researchers up-to-date insights into the field. We anticipate further research along these dimensions, such as agent-human regimes (Liang et al., 2024) and automated workflow design architectures (Hu et al., 2025; Zhang et al., 2024c; Zhou et al., 2025a).", + "bbox": [ + 109, + 443, + 880, + 609 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "2 Background", + "text_level": 1, + "bbox": [ + 112, + 628, + 256, + 646 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In this section, we introduce foundational concepts that will be utilized throughout the paper.", + "bbox": [ + 109, + 661, + 789, + 678 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "2.1 Problem Formulation", + "text_level": 1, + "bbox": [ + 112, + 694, + 316, + 709 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "LLM reasoning is often formulated within the Markov Decision Process (MDP) framework (Bellman, 1958), treating reasoning as a sequential decision-making process. While many of the terminologies in LLM reasoning originate from the AI agent and reinforcement learning (RL) literature (Russell & Norvig, 2010), their meaning in LLM reasoning can sometimes differ to suit the nature of LLM-based reasoning.", + "bbox": [ + 109, + 720, + 880, + 782 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Reasoning step and thought The definition of what makes a reasoning step can vary depending on the specific inference or learning algorithm used, and it often depends on the granularity at which rewards (or feedback) are considered. Generally, a reasoning step can be expressed as a sequence of tokens $a_{t} = (x_{t_{1}},\\ldots ,x_{t_{K}})$ , where $x_{t_k}$ is the $k$ -th token at inference step $t$ . Typically, $a_{t}$ represents a coherent step in reasoning (Lightman et al., 2024), such as a logical deduction or an intermediate conclusion. However, in extreme cases, a reasoning step can be the entire response (Zhang et al., 2024b; DeepSeek-AI et al., 2025)", + "bbox": [ + 109, + 797, + 883, + 888 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "To avoid redundancy with existing literature, we do not include an analysis of reasoning benchmarks in this survey. For a detailed discussion of benchmarks, we direct readers to Xu et al. (2025d); Besta et al. (2025).", + "bbox": [ + 111, + 898, + 883, + 925 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/ccf703a6c16491ff1de1e5cf6ff471fa0b06b107d09e10325da106c385ddafd4.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
SymbolName/terminologyExplanation
atAction/responseThe reasoning step or action taken at time step t , where t ∈ {1,2,...,T}
stState/contextst := (q, a1, ..., at-1), where q is the prompt/question.
RReward model/verifierEvaluates the reasoning quality of action at state st, providing feedback.
rtRewardrt := R(st, at), reward given by verifier at time step t.
τTrajectoryτ := ((s0, a0, r0), ..., (sT, aT, rT)), The entire reasoning process leading to an answer.
πPolicy model/reasonerat ~ π(at|st): The reasoning strategy that maps a reasoning state to the next reasoning step.
VValue ModelEstimates the expected future reasoning quality from state st.
FRefinera′t = F(st, at, rt): Modifies or refines the action based on feedback from the verifier.
", + "bbox": [ + 117, + 101, + 879, + 223 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 1: An overview of symbols and terminologies for convenience.", + "bbox": [ + 253, + 232, + 743, + 248 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "or a single token (Schulman et al., 2017; Ouyang et al., 2022).2 The term Thought generally refers to the sequence of reasoning steps (i.e., reasoning trajectory) that occur from the question (excluding the question itself) to the final answer (excluding the final answer).", + "bbox": [ + 109, + 273, + 883, + 321 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Reasoning as MDP An MDP is a general framework for modeling environments where an agent makes sequential decisions by observing states and receiving rewards for its actions. The state-action-reward trajectories in an MDP can be formally expressed as: $\\tau = ((s_0, a_0, r_0), \\ldots, (s_T, a_T, r_T))$ , where $T$ is the trajectory length. Naturally, LLM reasoning can be framed as an MDP, as each reasoning step builds upon previous ones to arrive at a final answer $(s_T)$ from a question $(s_0)$ . However, a key distinction lies in how the state transition function $P(s_{t+1} | s_t, a_t)$ is defined. In traditional MDPs, state transitions are driven by the environment (unknown to the agent). In LLM reasoning, this depends on the system architecture: in standalone LLMs, the model itself generates the next state, whereas in agentic systems, state transitions can be influenced by external tools within the environment.", + "bbox": [ + 109, + 335, + 883, + 470 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In RL-based approaches, the goal is to maximize the reasoning quality measured by the cumulative reward:", + "bbox": [ + 109, + 479, + 883, + 494 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\max \\mathbb {E} _ {\\tau \\sim P (\\tau | s _ {0}, \\pi)} \\left[ \\sum_ {t = 1} ^ {T} r _ {t} \\right], \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 400, + 513, + 883, + 555 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $\\pi$ is the reasoning policy and $r_t = \\mathcal{R}(s_t, a_t)$ is the reward given by the reward function $\\mathcal{R}$ at time step $t$ . There are two primary approaches to optimize Equation 1. The first is via training, which involves optimizing model parameters to learn the optimal policy $\\pi$ through methods like preference learning (e.g., DPO (Rafailov et al., 2023)) or reinforcement learning (e.g., PPO (Schulman et al., 2017)). The second is inference-scaling, which optimizes Equation 1 without altering model parameters. Instead, it employs a form of \"search\" with a frozen model, often guided by a reward model (Zhang et al., 2025b). We summarize key terminologies in Table 1.", + "bbox": [ + 109, + 561, + 883, + 667 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "2.2 Key Components of LLM Reasoning Systems", + "text_level": 1, + "bbox": [ + 112, + 683, + 500, + 700 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "An LLM-based reasoning system may contain three key components depending on the reasoning regime and system architecture: (a) A Reasoner that generates the reasoning steps, serving as the policy model; (b) Verifiers that evaluate the correctness of the final outcome and/or reasoning steps, serving as reward functions; and (c) A Refiner that improves reasoning trajectories by refining responses based on the feedback from the verifier. Figure 4 shows a depiction of these components. While these components play complementary and important roles in a reasoning system, they can be implemented by the same LLM, e.g., self-refinement (Saunders et al., 2022; Madaan et al., 2024) unifies them.", + "bbox": [ + 109, + 710, + 883, + 816 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Reasoner The reasoner generates reasoning steps based on the current state of the reasoning process. It takes as input the previous states and outputs the next response or action. As the core component of a reasoning system, it determines how reasoning progresses and influences the final outcome.", + "bbox": [ + 109, + 832, + 883, + 878 + ], + "page_idx": 7 + }, + { + "type": "page_footnote", + "text": "2Although RLHF (Reinforcement Learning from Human Feedback) methods (Ouyang et al., 2022) receive rewards based on the final answer (outcome level), the underlying RL algorithms operate as multi-step RL at the token level. This differs from approaches like DeepSeek-R1 (DeepSeek-AI et al., 2025), which employs one-step RL for training.", + "bbox": [ + 109, + 886, + 883, + 925 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/26c8347b750ab38973406a291c8177a4bbe8c7b3a3166b8f3370a1e920c5081b.jpg", + "image_caption": [ + "Figure 4: Three key components of a reasoning system. The Reasoner proposes new responses (usually accompanied with rationales) for a query. The Verifier takes as input a verification instruction (e.g., what aspects to evaluate) and the response(s) from the reasoner, then outputs a judgment on the response(s) (often in the form of a numeric score or relative order, and typically accompanied by a natural language critique or rationale for its judgment). The Refiner, unlike the first two, takes as input an incorrect response and optionally the critique (as provided by the verifier) and outputs a revised response." + ], + "image_footnote": [], + "bbox": [ + 305, + 101, + 692, + 349 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "**Verifier** The verifier assesses the quality of the final answer or intermediate reasoning steps and provides feedback to the reasoner. Verifiers can be outcome-level, where only the outcome is evaluated, or process-level, where intermediate reasoning steps are also evaluated. The type of feedback can range from a scalar reward (e.g., correct/wrong answer on a math problem or pass/fail for code test case) to natural language explanations. When ground-truth is available (e.g., during training), the verifier can be implemented using rule-based functions (e.g., string matching) or by training a reward model or using an LLM-judge model.", + "bbox": [ + 109, + 476, + 883, + 569 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Refiner Given a feedback from the verifier, as well as a response from the reasoner, a refiner tries to improve and polish the original reasoning trajectory containing flaws. Refiners can play two important roles in reasoning. First, it can serve as a general approach to improve the performance during inference. More importantly, by providing explicit analysis, a refiner can also conduct implicit search, i.e., pointing out the obstacles in current trajectory, and offer a new perspective to compress the search space. Yet, recent studies (Qu et al., 2024a) show that is not at least easier than learning reasoning.", + "bbox": [ + 109, + 582, + 883, + 674 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "2.3 System Architectures", + "text_level": 1, + "bbox": [ + 112, + 689, + 318, + 705 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Building on the three key components introduced above, in this section, we describe how these elements are organized within different system architectures to achieve effective reasoning. While the three components serve as the foundation, their integration and interaction vary across architectural paradigms. In this survey, we structure reasoning systems into three main types: standalone LLM, single-agent system, and multi-agent system. Figure 5 shows their comparison with visualizations.", + "bbox": [ + 109, + 717, + 883, + 794 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "2.3.1 Standalone LLM Systems", + "text_level": 1, + "bbox": [ + 112, + 808, + 364, + 824 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "A standalone LLM system comprises a single LLM which can play the role of one or more components (we refer this as unified components) in the reasoning system. It processes an input prompt and generates final outputs, which often include rationales or reasoning steps. As an LLM, it has the capability to produce diverse rationales through sampling—a key property utilized by many advanced reasoning techniques. Importantly, a standalone LLM operates independently, without interacting with external environments or collaborating with other LLMs. Its decision-making is based solely on simple input-output mappings or through iterative", + "bbox": [ + 109, + 833, + 883, + 925 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 504, + 960 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/ff18fb1a724ca38991f358e0f8a0b23cfed9336202da57dbd8f7c7110d2f0334.jpg", + "image_caption": [ + "Figure 5: Three architecture types used for designing a reasoning system in the context of LLMs. highlights perspectives that the literature emphasizes for customization." + ], + "image_footnote": [], + "bbox": [ + 114, + 99, + 883, + 441 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "sampling from the same model, where the prompt incorporates prior reasoning steps (a method known as self-contained reasoning). This self-contained nature allows the LLM to function autonomously while maintaining coherence in its reasoning processes.", + "bbox": [ + 109, + 511, + 883, + 556 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "2.3.2 From Standalone LLM to Language Agents", + "text_level": 1, + "bbox": [ + 112, + 574, + 498, + 590 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "While the concept of an agent has been a long-standing idea in AI (Russell & Norvig, 2010), the notion of language agents has gained prominence alongside recent advancements in LLMs. The key distinction between an agent and a standalone LLM lies in two advanced capabilities: interactivity (Weng, 2023; Yao & Narasimhan, 2023) and autonomy (Xi et al., 2023; Wang et al., 2024d). Interactivity refers to an agent's ability to engage with the external world, including environments or other agents. This capability is crucial because LLMs, while powerful, often have limited knowledge and reasoning abilities confined to their internal memory. By enabling interaction with the external world, an LLM can augment its internal knowledge with external information, significantly expanding its understanding and grounding its outputs in real-world observations. Autonomy, on the other hand, refers to an agent's ability not only to follow human instructions but also to independently initiate and execute actions. This capability often involves planning but can extend to more complex behaviors. For instance, a fully autonomous agent should be capable of detecting novel situations, proactively taking initiative, and determining effective interaction strategies without explicit human guidance. These advanced capabilities distinguish LLM-based agents from standalone LLMs, enabling them to operate more dynamically and adaptively in real-world scenarios.", + "bbox": [ + 109, + 601, + 883, + 815 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "To delineate the boundary between the agent and its environment, we employ the concept of controllability (Sumers et al., 2024). Specifically, the environment is defined as an external module that the agent cannot modify. For example, a knowledge base containing resources like Wikipedia or a compiler is considered part of the environment because the agent cannot alter it. Similarly, another LLM acting as a judge or verifier is also treated as part of the environment, as its outputs operate independently of the agent. In contrast,", + "bbox": [ + 109, + 820, + 883, + 897 + ], + "page_idx": 9 + }, + { + "type": "page_footnote", + "text": "3In this survey, the terms agent and LLM-based agent are used interchangeably unless stated otherwise.", + "bbox": [ + 130, + 910, + 767, + 924 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 948, + 508, + 959 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "components like working memory or prompts that the agent can directly modify are not classified as part of the environment.", + "bbox": [ + 109, + 103, + 883, + 133 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "In this work, we adopt the perspective of Kapoor et al. (2024), which conceptualizes agentiness as a spectrum. The more interactiveness and autonomy an LLM exhibits, the more agentic it is considered to be. In the upper right of Figure 5, we illustrate this spectrum visually. Within this spectrum, we define a system with agent-environment interaction as a single-agent system and a system that additionally incorporates agent-agent communication as a multi-agent system.", + "bbox": [ + 109, + 141, + 883, + 218 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "2.3.3 Single-agent Systems", + "text_level": 1, + "bbox": [ + 112, + 233, + 333, + 250 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Given the definitions above, the interaction between the agent and its environment is a central aspect of single-agent systems. These interactions can vary widely in complexity and design. In Figure 5, we illustrate a single-agent system in the bottom left. The focus here is on designing the agent's actions—such as tool use, retrieval, or answer refinement—and obtaining useful perceptions from the environment, which may include feedback from an external verifier or compiler, or data from a knowledge base (KB). This architecture enhances the LLM's capabilities by enabling it to dynamically engage with and adapt to external contexts.", + "bbox": [ + 109, + 258, + 883, + 351 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "While a fully autonomous agent should ideally learn to interact with the environment automatically, the literature identifies several predefined interaction patterns (also referred to as workflows (Schluntz & Zhang, 2024)) that have proven effective. We elaborate on these patterns below and, in Sections 3.2 and 5.2, explore specific techniques that leverage them to improve agent performance.", + "bbox": [ + 109, + 357, + 883, + 420 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "- Generator-evaluator pattern. This pattern divides the reasoning capability into two distinct components: a generator and an evaluator (e.g., a verifier or other evaluators like compilers). It represents a natural extension of RL-style optimization and has gained popularity since the introduction of RLHF (Ouyang et al., 2022). In this setup, the evaluator functions as the environment, providing feedback on the quality of the agent's actions. Such feedback is particularly valuable for guiding the search for effective actions and improving decision-making. Recent studies have demonstrated that verifiers can significantly enhance the performance and generalization capabilities of agents (Zhang et al., 2024i; Sun et al., 2024c). However, this pattern is not without its challenges. It can suffer from unreliable components and error propagation. For instance, Kim et al. (2024d) points out that verifiers are vulnerable to reward hacking, where the reasoner exploits loopholes in the verifier to achieve higher reward scores, ultimately degrading the overall performance of the agentic system.", + "bbox": [ + 109, + 426, + 880, + 592 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "- Generator-critic-refiner pattern This pattern divides reasoning capabilities into three components: a reasoner, a critic, and a refiner. The critic acts as the environment, providing feedback—typically in the form of guidance on how to correct errors in the generated actions. The refiner then takes the flawed actions and the critic's feedback as input, producing revised and improved actions. This pattern enables the agentic system to benefit from iterative feedback, making it particularly effective for complex tasks where the initial outputs of the reasoner are suboptimal. However, it may also lead to a phenomenon known as 'over-refinement' (Chen et al., 2024b), where the agent iterates excessively, leading to diminishing returns or even degraded performance rather than improvement. Careful design and balancing of the refinement process are essential to mitigate this risk and ensure the pattern's effectiveness.", + "bbox": [ + 109, + 599, + 880, + 737 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "2.3.4 Multi-agent Systems", + "text_level": 1, + "bbox": [ + 112, + 752, + 328, + 767 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "In addition to the agent-environment loop in single-agent systems, multi-agent systems introduce an additional agent-agent loop, where multiple agents interact and influence one another. In this framework, agents assume different roles, exchange messages, and collaboratively coordinate their actions while operating within a shared environment.4 Figure 5 shows an example multi-agent system. It involves $N$ agents (often playing distinct roles) and $M$ rounds of communication through message exchanges. The focus is on designing effective communication protocols (e.g., debates) and coordinating the agents' actions to determine a final decision or action within the environment (e.g., employing an additional judge to adjudicate final actions). The following communication patterns have emerged as effective predefined strategies:", + "bbox": [ + 109, + 777, + 880, + 900 + ], + "page_idx": 10 + }, + { + "type": "page_footnote", + "text": "4We use message to denote agent-agent communication and action to denote agent-environment interaction.", + "bbox": [ + 130, + 910, + 792, + 924 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/9d408ce1bd52f2ac1a399692e04982fa9e638b7c08e8b740c6577799ac51cbb3.jpg", + "image_caption": [ + "Figure 6: Inference-time and training-time regimes of a reasoning system. We use tree search as an example to illustrate the inference scaling and trajectories collection. Given a query, inference scaling relies on extensive inference computation to improve the reasoner's distribution. Specifically, it generates multiple candidate reasoning steps at each layer and selects the best solution to proceed (e.g., by using an external verifier or assembling). In contrast, learning to reason focuses on collecting trajectories and training from the collected data with minimal inference-time computation. It takes all trajectories in the process (identical to those used in inference-scaling, allowing us to reuse the same tree) and labels them with preferences. The preference data can then be used to train the reasoner." + ], + "image_footnote": [], + "bbox": [ + 267, + 99, + 710, + 292 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Debate pattern. In this pattern, two or more agents engage in a debate with each other. The term debate can vary in implementation. For example, in (Wang et al., 2024h), it involves agents addressing the problem independently and incorporating other agents' responses as additional advice. In (Liang et al., 2023b), it means agents approach the problem from opposing perspectives. After the debate, a consensus is reached through mechanisms such as an additional judge, weighted voting, or a fixed number of iterations, ultimately determining the collective action to be taken in the environment.", + "- Reconcile pattern. This pattern facilitates collaborative round-table discussions among agents, enabling them to reach a consensus through mechanisms such as voting or confidence levels. For instance, ReConcile (Chen et al., 2023c) introduce a round-table discussion framework where agents make decisions using a weighted voting system. In this process, each agent assigns a confidence level to its proposed answers, and these confidence levels are used as weights to cast votes, ultimately determining the final decision." + ], + "bbox": [ + 109, + 468, + 883, + 642 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "2.4 Reasoning Regimes", + "text_level": 1, + "bbox": [ + 112, + 678, + 303, + 694 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Orthogonal to the components and architectures discussed above, reasoning systems can operate under distinct computational regimes. Systems employing inference-time computation can refine their outputs through iterative reflection and revision or search for improved solutions by repeatedly sampling the underlying model. However, such systems must balance cost (e.g., computational resources, latency) and effectiveness (e.g., accuracy, reliability) in achieving correct solutions. The learning-to-reason paradigm addresses this tradeoff by shifting computational burdens from inference to training, learning policies from simulated reasoning processes. While both regimes enhance effectiveness by redistributing computational effort across training and inference, they lack the capacity to dynamically adapt resource allocation or method selection to individual problems—a limitation highlighted in recent work (Sprague et al., 2024a; Kapoor et al., 2024; Chen et al., 2024d). To bridge this gap, emerging approaches within the learning-to-reason framework focus on optimizing the reasoning process itself, jointly minimizing cost and maximizing effectiveness. This involves dynamically allocating computational resources, searching for contextually optimal methods, and training models to synergize with adaptive inference-time strategies. Figure 6 contrasts these regimes, and we elaborate on each in the sections below.", + "bbox": [ + 109, + 713, + 883, + 924 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/3df501760c731b5b3866e2f732d8ca9368b17b759d192490f7d38cbc2fd05fd1.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
PerspectiveMethodCharacteristicRepresentative Work
Constructing PromptsInstruction engineeringModify instruction by human-design templateParanjape et al. (2021); Zhou et al. (2023b)
Demonstration engineeringDrawing analogy from relevant experienceWei et al. (2022b); Luo et al. (2024d)
Prompt optimizationSearch for optimized prompt (e.g., bootstrap)Xu et al. (2022); Pryzant et al. (2023)
Optimizing OutputGenerating subtasksDecompose the original task into manageable subtasksDua et al. (2022); Zhou et al. (2023a)
Exploration and searchBranch and explore multiple paths to optimize reasoning trajectoriesYao et al. (2023a); Besta et al. (2024)
", + "bbox": [ + 117, + 101, + 879, + 170 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Table 2: Summary of inference scaling with standalone LLM.", + "bbox": [ + 276, + 180, + 718, + 196 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "2.4.1 Inference Scaling", + "text_level": 1, + "bbox": [ + 112, + 226, + 302, + 242 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Inference scaling techniques enhance reasoning capabilities during test time by increasing the amount of computation performed before generating an answer. These methods can be broadly categorized into three key strategies: (a) Prompt engineering and optimization, which focuses on constructing effective reasoning-provoking prompts through template-based methods, human curation, and automated optimization. (b) Search and planning methods, which include task decomposition, plan generation and verification, and exploration-based approaches. They enable structured multi-step reasoning, often involving backtracking within trees or graphs, to systematically explore potential solutions and verify their validity. (c) System-level enhancements, which incorporates external tools, knowledge sources, and verification mechanisms to augment the model's reasoning capabilities. For standalone LLMs, inference scaling primarily revolves around prompt construction and search strategies. In multi-agent settings, it further extends to include agent-agent communication and coordinated action strategies, enabling collaborative problem-solving. While these techniques have demonstrated significant effectiveness in improving reasoning performance without requiring updates to model parameters, they often come with increased computational costs during inference.", + "bbox": [ + 109, + 253, + 883, + 450 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "2.4.2 Learning to Reason", + "text_level": 1, + "bbox": [ + 112, + 469, + 316, + 484 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "This regime shifts the focus to training models to reason effectively before deployment, often referred to as training-time methods. The core idea is to simulate inference, generating trajectories that capture potential reasoning paths. These trajectories are then used to train the reasoner with online or offline learning methods. The methods include supervised and/or reinforcement learning. While learning-to-reason typically minimizes computational costs during inference, it incurs higher costs during simulation and training. In Section 5, we provide a detailed discussion of methods within this regime across different architectures.", + "bbox": [ + 109, + 496, + 883, + 588 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Recently, this paradigm has evolved to incorporate knowledge of both training and testing methods, enabling adaptive strategies. For instance, it now allows for the training of reasoners optimized for known inference techniques (Balashankar et al., 2024), or dynamically distributes computational costs between training and testing, offering a more flexible and efficient framework (Damani et al., 2025; Yue et al., 2025).", + "bbox": [ + 109, + 594, + 883, + 657 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "3 Improving Reasoning with Inference Scaling", + "text_level": 1, + "bbox": [ + 112, + 678, + 542, + 696 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Compared to small-scale models, pretrained large-scale language models (LLMs) have demonstrated emergent capabilities (Wei et al., 2022a), such as in-context learning (Dong et al., 2024) and role-playing (Shanahan et al., 2023a), which manifest without additional fine-tuning (i.e., without any gradient updates). Arguably, many of these abilities become apparent only after reaching a certain scale in model size. While scaling model parameters has been shown to improve reasoning performance across various tasks, the returns have diminished due to the high cost of training increasingly larger models. As a result, inference scaling has emerged as an appealing and orthogonal paradigm to unlock reasoning abilities in LLMs by providing additional test-time compute, allowing them to \"think\" before producing a final answer. It has been demonstrated that optimal scaling of test-time compute can be more effective than scaling model parameters (Snell et al., 2024), as it offers better generalization through enhanced flexibility in prompt and workflow design. Such deliberate thinking can be enabled either through training (DeepSeek-AI et al., 2025) or by explicit programming at inference time (OpenAI et al., 2024). In this section, we focus on the latter and defer training-time methods to Section 5. We begin with inference scaling methods for standalone LLMs and subsequently extend the discussion to single and multi-agent compound systems.", + "bbox": [ + 109, + 712, + 883, + 925 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 948, + 508, + 959 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "3.1 Inference Scaling With Standalone LLM", + "text_level": 1, + "bbox": [ + 112, + 103, + 459, + 119 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In this section, we examine the core components and techniques that have made inference-time reasoning methods effective. Many of these methods draw inspiration from research on human cognitive processes on planning, problem solving, and decision-making (Newell et al., 1959; 1972; Stanovich & West, 2000).", + "bbox": [ + 111, + 132, + 883, + 180 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "3.1.1 Constructing Reasoning Provoking Prompts", + "text_level": 1, + "bbox": [ + 112, + 198, + 501, + 214 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Although large-scale pre-training endows LLMs with patterns that support reasoning, these capabilities often remain latent under generic prompts. Liu et al. (2025c) demonstrate that deep-reasoning behaviors—such as reflection and self-verification, which signal profound analytical thought—can be amplified simply by increasing the sampling budget. This highlights the importance of designing prompts that deliberately provoke reasoning, thereby surfacing and leveraging the latent human priors within LLMs.", + "bbox": [ + 111, + 226, + 883, + 303 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Instruction engineering Enabling LLMs to reason effectively depends heavily on the quality of the instructions provided (Sclar et al., 2024; Zhuo et al., 2024; Long et al., 2024a). Recognizing this, numerous prompt engineering studies aim to improve LLM reasoning by enhancing instructions. Extensive efforts in this direction primarily focus on template-based and human-curated instructions (Paranjape et al., 2021; Sanh et al., 2022; Mishra et al., 2022; Si et al., 2023; Long et al., 2024b). With LLMs becoming increasingly adept at following human instructions and generating human-like text, focus has shifted toward leveraging the models themselves to craft and refine high-quality instructions. A notable example of this shift is the Automatic Prompt Engineer (APE) introduced by Zhou et al. (2023b), which uses LLMs to generate high-quality instructions, achieving performance comparable to or surpassing that of human annotators on 31 reasoning tasks. Furthermore, other studies have proposed methods to modify instructions for improved reasoning. For instance, Deng et al. (2023a) and Mekala et al. (2024) present Rephrase-and-Response and EchoPrompt, respectively, two simple yet effective strategies where LLMs are instructed to rephrase queries before answering, significantly enhancing LLM performance on reasoning tasks. Similarly, Tian et al. (2023) introduce R3 prompting, which instructs LLMs to first extract key sentences from noisy contexts, then rephrase the instruction to explicitly include extracted sentences.", + "bbox": [ + 111, + 321, + 883, + 549 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Demonstration engineering Humans can address new problems by drawing analogy from relevant past experience (Holyoak, 2012). Inspired by this, Yasunaga et al. (2024) propose analogical prompting to guide LLMs to self-generate exemplars or knowledge relevant to the given problem as few-shot demonstrations for reasoning, outperforming hand-crafted or retrieved examples. For example, LLMs are prompted to generate a problem on calculating a third-order determinant before solving the given fourth-order determinant. Similarly, Chen et al. (2023d); Yang et al. (2023a); Luo et al. (2024a) highlight the effectiveness of self-generated relevant exemplars. Qin et al. (2025) further systematically assess the capability of LLMs to perform analogical reasoning and find that performance is not primarily determined by whether the exemplars are topically relevant to the task. Instead, they show that even exemplars from unrelated domains, such as self-generated biological exemplars, can lead to improved performance, as long as they are accurate and structurally aligned with the reasoning steps required by the target task. This highlights that the quality of the exemplar (its correctness, clarity, and structural usefulness for reasoning) can be the key limiting factor, rather than the relevancy regarding to the topic domain.", + "bbox": [ + 111, + 569, + 883, + 767 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Conventionally, a fixed set of few-shot demonstrations is applied to all queries, which can be suboptimal, especially when queries vary significantly. An alternative approach is to retrieve demonstrations tailored to the current query. Research has shown that retrieval-based demonstration selection significantly improves task performance. The main goals for selecting demonstrations are similarity (Rubin et al., 2022; Agrawal et al., 2023; Li et al., 2023e; Ye et al., 2023a) and diversity (Levy et al., 2023; He et al., 2023; Kim et al., 2024a). Various retrieval strategies have been proposed for selecting $k$ demonstrations, including top- $k$ similarity-based retrieval (Liu et al., 2022; Li et al., 2023e), clustering-based retrieval (Luo et al., 2023c; Wang et al., 2024i), and iterative retrieval (Khattab et al., 2022; Levy et al., 2023; Wang et al., 2024e). These methods enable adaptive and effective demonstration selection, enhancing the model's reasoning and generalization across diverse queries.", + "bbox": [ + 111, + 773, + 883, + 925 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 488, + 946, + 509, + 960 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In addition, many-shot in-context learning has emerged as a complementary line of work, where hundreds or even thousands of demonstrations are provided to significantly enhance the performance of LLMs, especially on complex reasoning tasks (Li et al., 2023c; Agarwal et al., 2024; Zou et al., 2024; Gu et al., 2025). Many-shot prompting can be seen as an extreme form of demonstration engineering, where the focus is on scaling the quantity of demonstrations to maximize the model's capacity to learn from in-context examples. However, the effectiveness of many-shot ICL is often limited by the high cost of obtaining a large number of labeled demonstrations. To mitigate this gap, Chen et al. (2025) recently introduce MAPLE, a novel influence-based many-shot ICL framework that identifies impactful unlabeled samples, pseudo-labels them by querying LLMs, and adaptively selects them for each test query. This approach effectively enhances many-shot ICL performance with minimal labeling cost, demonstrating improved adaptability and reasoning capabilities of LLMs.", + "bbox": [ + 109, + 103, + 883, + 271 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Prompt optimization Prompt optimization methods, aiming to systematically and strategically optimize prompts for improved performance, have been extensively explored for enhancing LLM reasoning. For instance, Xu et al. (2022) introduce Genetic Prompt Search (GPS), leveraging genetic algorithms to search for the best instruction. Similarly, Guo et al. (2024a) and Fernando et al. (2024) employ evolutionary algorithms to iteratively refine instructions, while Long et al. (2024c) introduce a minimax-game framework, inspired by Generative Adversarial Networks (Goodfellow et al., 2014) to simultaneously optimize instructions and demonstrations. Furthermore, Pryzant et al. (2023) present the concept of \"text gradients\" which leverage feedback from prompt executions and LLMs to update prompts, akin to Optimization by PROempting (OPRO) (Yang et al., 2024c), which uses execution feedback. Despite these advances, the interplay between various prompt optimization algorithms remains underexplored. Recently, Wan et al. (2024a) conducted a comprehensive evaluation of representative techniques for instruction and demonstration optimization, examining their effectiveness in isolation and combination across a range of challenging tasks. Their findings indicate that intelligently reusing samples from prompt evaluations as demonstrations consistently enhances performance, that demonstration selection strategies can have a greater impact than instruction optimization techniques, and that a synergistic combination of demonstration and instruction optimization can outperform their individual contributions.", + "bbox": [ + 109, + 287, + 883, + 532 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "3.1.2 Optimizing Reasoning Output with Search and Planning", + "text_level": 1, + "bbox": [ + 112, + 550, + 591, + 566 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Generating reasoning subtasks Human problem-solving often involves planning manageable steps that lead to a successful resolution (Dostál, 2015). Likewise, improving LLM reasoning by breaking down complex problems into intermediate steps has become a successful paradigm. In this context, subtasks refer to the decomposed parts of a problem, structures are the frameworks guiding the reasoning process, and intermediate steps are intermediate results produced at each stage of problem-solving. Nye et al. (2021) and Wei et al. (2022b) pioneer this direction by proposing Chain-of-Thought (CoT) prompting which uses a few demonstrations with human-written intermediate steps to guide the model in solving complex problems in a similar style. Kojima et al. (2022) further simplified this approach by introducing zero-shot CoT prompting, which eliminates the need for demonstrations by instructing models to \"think step by step\" before answering.", + "bbox": [ + 109, + 577, + 883, + 715 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Simple CoT prompting often struggles as task complexity increases, particularly when the task surpasses the complexity of the provided demonstrations. To address this, researchers have proposed methods that explicitly guide models in decomposing tasks into subtasks, thereby enhancing intermediate step reasoning. Dua et al. (2022) propose an iterative approach, where tasks are progressively broken down into simpler subtasks and solved step-by-step. Similarly, Zhou et al. (2023a); Khot et al. (2023) and Suzgun & Kalai (2024a) advocate for a \"divide-and-conquer\" strategy, where tasks are first divided into subtasks and then solved sequentially.", + "bbox": [ + 109, + 720, + 880, + 828 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Beyond subtasks, researchers emphasize the importance of robust reasoning structures such as hierarchical and decision-making processes that capture the underlying mechanisms involved in problem-solving. Zhou et al. (2024b) introduce Self-Disccover, a framework that enables models to self-identify reasoning structures for any task using a seed set of general reasoning skill modules. Building on this, Aswani et al. (2024) propose Auto-Evolve, which dynamically adapts reasoning modules to accommodate more diverse problems. In addition to designing better reasoning steps, several studies address the need to correct intermediate", + "bbox": [ + 109, + 833, + 883, + 925 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 946, + 508, + 960 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/b56b161b40385ab764234fe1b8b74a6302e624a8559602cb7e9c4810b1a0ed83.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
PerspectiveMethodCharacteristicRepresentative Work
Feedback RefinementVerifier and ReflectionUse verifiers to select, modify, or refine actionsSnell et al. (2025); Madaan et al. (2023b)
Action EnhancementRetrieval and ToolAccess external knowledge and specialized resourcesLi et al. (2024e); Ma et al. (2024a)
", + "bbox": [ + 117, + 101, + 879, + 143 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Table 3: Summary of inference scaling with single-agent system", + "bbox": [ + 267, + 155, + 725, + 170 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "steps. For example, Deng et al. (2024a); Yan et al. (2024) and Wu et al. (2024b) propose methods to refine intermediate outputs. Notably, Zhang et al. (2024i) observe that smaller models ( $\\leq 13\\mathrm{B}$ parameters) in particular need stronger models acting as verifiers to validate and correct intermediate steps.", + "bbox": [ + 109, + 198, + 883, + 244 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Exploration and search Research on human problem-solving reveals that complex reasoning tasks often admit multiple valid paths to reach a correct solution (Stanovich & West, 2000). Compared to linear reasoning structures like chain-of-thought, approaches that incorporate exploration during problem-solving have shown significant improvements for complex reasoning tasks. Unlike task decomposition methods (Dua et al., 2022; Zhou et al., 2023a; Khot et al., 2023), exploration-based approaches employ dynamic search through multiple possible reasoning paths simultaneously rather than following certain decomposition patterns, enabling models to explore ambiguous solution strategies for complex problems. Exploration typically involves two key components: branching and aggregation. Due to the stochastic nature of language model decoding, branching is often implemented through independent re-sampling with non-zero temperature, generating diverse reasoning chains. Early methods, such as self-consistency (Wang et al., 2023f), introduced branching only at the beginning of the reasoning chain, conditioned on the initial query. While simple, this approach lacks local exploration of intermediate reasoning steps, has limited applicability for tasks with multiple valid answers, and produces reasoning chains with restricted diversity (Chen et al., 2024d). More recent advancements, such as Tree-of-Thoughts (Yao et al., 2023a), Graph-of-Thoughts (Besta et al., 2024), and Forest-of-Thoughts (Bi et al., 2024), enable finer-grained branching by considering both the query and a history of previous thoughts or thought-state sequences, allowing for more nuanced and flexible exploration.", + "bbox": [ + 114, + 258, + 883, + 501 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The effectiveness of branched reasoning paths with thoughts or answers depends on aggregation or evaluation strategies. Recent progress is centered around two categories: ensemble-based methods and verifier-based methods. Ensemble-based methods have been widely employed due to their simplicity and self-contained nature, requiring no external knowledge or sources for validation. These approaches typically employ strategies such as majority voting across answer tokens (Wang et al., 2023f; 2024a; Li et al., 2024b) or confidence-based selection (Wang & Zhou, 2024). Verifier-based methods, in contrast, employ external verifiers or judges to score and select preferred answers among candidate solutions.", + "bbox": [ + 109, + 508, + 883, + 614 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "3.2 Inference Scaling With Single-agent System", + "text_level": 1, + "bbox": [ + 112, + 632, + 486, + 648 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "LLMs are trained on static, finite datasets, which inherently limits their parametric knowledge. This limitation hinders their ability to reason effectively in scenarios requiring up-to-date or highly specialized knowledge. The use of an agentic system, where LLMs are augmented with external verifiers, retrieval and tool integration, has proven effective in such scenarios. Verifiers provide reasoners with a signal of the quality of their outputs (e.g., a score or natural language feedback), which may be used by reasoners to modify or improve their outputs. Retrieval augmentation improves reasoning by enabling the agent to access relevant external knowledge, thereby reducing hallucinations and ensuring more accurate, fact-based responses. Additionally, the agent can achieve higher performance by leveraging specialized external tools to handle specific intermediate reasoning steps. For instance, allowing an agent to use a calculator can minimize errors stemming from inaccuracies in numerical generation.", + "bbox": [ + 109, + 659, + 883, + 811 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "A pioneering approach in this domain is the ReAct framework (Yao et al., 2023b), which interleaves reasoning and acting by prompting LLMs to generate both reasoning traces and task-specific actions in an interleaved manner. This synergy allows the model to induce, track, and update action plans while interfacing with external sources (environment) to gather additional information. ReAct has demonstrated effectiveness across QA and interactive decision-making tasks. Building upon ReAct, LATS (Zhou et al., 2024a) unifies reasoning, acting, and planning within LLMs. By combining Monte Carlo Tree Search with ReAct, LATS enables structured search over a combinatorial space of reasoning and acting paths. More recently, Liu et al.", + "bbox": [ + 109, + 818, + 883, + 925 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "(2024f) formalize reasoning and acting with LLMs under a Bayesian adaptive MDP and propose RAFA, a theoretically grounded framework for orchestrating the reasoning and acting of LLMs.", + "bbox": [ + 112, + 103, + 883, + 133 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "3.2.1 Refinement with Verifiers and Reflections", + "text_level": 1, + "bbox": [ + 112, + 148, + 482, + 164 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "A natural basis for modifying agent actions is the quality of their generated outputs—if the output is incorrect, the agent should attempt to correct it. However, ground-truth references are typically unavailable to the agent at test time. In such scenarios, agents often rely on verifiers, which are models or systems that provide an approximate measure of correctness, to guide action modifications. A special case arises when the verifier has access to ground-truth outcomes. Oracle verifiers (First et al., 2023; Xin et al., 2024a), which leverage correct answers, have shown significant performance improvements over baselines without verifiers (Huang et al., 2024a; Brown et al., 2024). However, their applicability is limited to scenarios where ground-truth data is readily available or easily accessible, such as in games or structured environments.", + "bbox": [ + 111, + 174, + 883, + 297 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "In contrast, non-oracle (or imperfect) verifiers provide a more widely applicable solution. Their form varies depending on the task and knowledge source. For instance, Cobbe et al. (2021); Feng et al. (2023b); Snell et al. (2025) employ trained outcome reward models (ORMs) as verifiers to rerank responses. For more granular evaluation, Lightman et al. (2024) and Zhang et al. (2025b) train process reward models (PRMs) to serve as inference-time verifiers. By enabling the reward model to assess each reasoning step individually, PRMs generally yield greater improvements during inference compared to ORMs (Uesato et al., 2022; Tian et al., 2024).", + "bbox": [ + 111, + 301, + 883, + 409 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "While reward models provide actionable signals about the quality of model responses, they are non-generative verifiers. As a result, they are unsuitable for verification approaches that require natural language feedback. For instance, synthesizing unit tests (Chen et al., 2023b; Hassid et al., 2024; Kapoor et al., 2024; Cook et al., 2024), commonly used in code generation tasks, necessitates verifiers capable of generating natural language. Broadly, generative verifiers are referred to as either critique models or LLM-as-judge models. In both cases, LLMs are either prompted or fine-tuned specifically for critique and evaluation. These models have been employed not only for output reranking (Vu et al., 2024) but also for providing valuable natural language feedback (Shinn et al., 2024; Shridhar et al., 2024; McAleese et al., 2024). However, recent studies have found that LLM-as-judge models generally underperform reward models (RMs) in terms of verification (Zhang et al., 2024e). To address this, researchers have sought to combine the strengths of both approaches under the Generative RM framework (Zhang et al., 2024e; Mahan et al., 2024; Liu et al., 2025b), aiming to unify the advantages of generative feedback with the precision of reward-based evaluation.", + "bbox": [ + 111, + 415, + 883, + 597 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Self-reflection or self-refinement approaches (Saunders et al., 2022; Madaan et al., 2024) aim to eliminate the need for additional, specialized verifier models by enabling the agent to critique and refine its own outputs. While some studies (Saunders et al., 2022; Madaan et al., 2024) have demonstrated empirical success, others highlight poor performance in the absence of robust verifiers (Stechly et al., 2023; Huang et al., 2024a; Stechly et al., 2024; Valmeekam et al., 2023; Shridhar et al., 2024). For a comprehensive review of recent advancements, see (Pan et al., 2024b).", + "bbox": [ + 111, + 604, + 883, + 696 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "While verification methods can be deployed across a wider range of domains, they are susceptible to false positives—incorrect solutions that nevertheless pass verification. This limitation becomes particularly relevant when scaling up inference compute, as it can lead to diminishing returns on computational investment. Interested readers can refer to (Stroebl et al., 2024) for a comprehensive analysis of these trade-offs.", + "bbox": [ + 111, + 702, + 882, + 763 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "3.2.2 Enhancement through Retrieval and Tool Utilization", + "text_level": 1, + "bbox": [ + 112, + 777, + 565, + 792 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "During the reasoning process, agents can retrieve external knowledge to refine their internal state representations, resulting in more accurate reasoning steps. The advantages of retrieval are particularly pronounced in knowledge-intensive tasks that demand multi-hop and long-horizon reasoning, where connecting multiple pieces of information is essential to arrive at a final answer. Through retrieval, agents can access intermediate information, verify connections between data points, and integrate them into their reasoning process (Shi et al., 2024; Jiang et al., 2024b; Wang et al., 2024m). Retrieval also addresses critical flaws in LLMs, such as hallucination and factual inaccuracies. By grounding responses in retrieved facts, models are less prone to generating erroneous information and more likely to produce reliable and trustworthy outputs. For", + "bbox": [ + 111, + 804, + 883, + 925 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 948, + 508, + 959 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/acc497e70ddf56ff5155272a2c39df1404b99cdd1fd1aff432306a10918635ad.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
PerspectiveMethodCharacteristicRepresentative Work
DesigningDecentralizedNo hierarchy among agentsChen et al. (2023c); Chang (2024)
CommunicationCentralizedPresence of a central lead agentSuzgun & Kalai (2024a); Pan et al. (2024a)
ActionConditioned generationPerform reasoning based on other agents' outputsWang et al. (2024c); Gao et al. (2024b)
CoordinationDynamic adaptationAdapt actions based on specific tasksFourney et al. (2024); Yuan et al. (2024c)
", + "bbox": [ + 119, + 101, + 879, + 170 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Table 4: Summary of inference scaling in multi-agent systems.", + "bbox": [ + 272, + 179, + 722, + 195 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "instance, frameworks such as Verify-and-Edit (Zhao et al., 2023) and Chain-of-Knowledge (Li et al., 2024e) dynamically incorporate structured and unstructured knowledge sources to revise and correct intermediate reasoning steps within a reasoning chain. CRP-RAG (Xu et al., 2024b) improves multi-hop reasoning by dynamically adjusting reasoning paths and aggregating relevant knowledge. SelfRewardRAG (Hammane et al., 2024) enhances medical reasoning by combining RAG with self-evaluation, dynamically retrieving and synthesizing up-to-date medical information to ensure accurate response generation. By leveraging real-time data, such as clinical records from PubMed, it ensures responses are both current and precise. Another example is Think-on-Graph (Sun et al., 2023), a retrieval framework that integrates knowledge graphs (KGs) and text retrieval to deepen and refine reasoning in LLMs. GRATR (Zhu et al., 2024b) applies RAG techniques to enhance reasoning in multiplayer games with incomplete information.", + "bbox": [ + 109, + 222, + 883, + 375 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "In addition to search and retrieval, agents can utilize other specialized tools to overcome their inherent limitations and significantly enhance reasoning performance. By integrating tools such as calculators, compilers, calendars, or specialized APIs, agents can access domain-specific resources, enabling them to operate more effectively in targeted applications (Yu et al., 2023b; Lu et al., 2024a; Li et al., 2025a). For instance, SCIAGENT (Ma et al., 2024b) leverages domain-specific tools like SymPy and WolframAlpha to enhance the reasoning capabilities of LLMs in scientific domains. Similarly, FinAgent (Zhang et al., 2024g) combines textual, numerical, and visual tools to improve performance in financial trading tasks.", + "bbox": [ + 109, + 381, + 883, + 488 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Moreover, external tools provide precise computational capabilities, allowing LLMs to transcend their limitations and perform complex numerical tasks with higher accuracy (Chen et al., 2023e; Li et al., 2023a). For example, MATHSENSEI (Das et al., 2024) employs tools such as Python, WolframAlpha, and Bing Search to tackle mathematical reasoning tasks across disciplines like algebra and calculus. TART (Lu et al., 2024b) integrates LLMs with tools for precise table-based reasoning tasks, such as table question answering and fact verification.", + "bbox": [ + 109, + 494, + 883, + 585 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Moreover, Anthropic introduced an open standard of Model Context Protocol (MCP) to seamlessly connect AI assistants with real-world data sources such as content repositories, business tools, and development environments. It provides a universal, scalable way for developers to create secure, two-way connections between AI tools and diverse data systems. While MCP holds significant promise, its adoption also introduces several challenges that must be addressed to support sustainable growth and responsible development. Hou et al. (2025) discussed some key issues, such as the absence of centralized security oversight, gaps in authentication and authorization, and difficulties in maintaining consistency across multi-step, cross-system workflows.", + "bbox": [ + 109, + 592, + 883, + 700 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "3.3 Inference Scaling With Multi-agent Systems", + "text_level": 1, + "bbox": [ + 109, + 717, + 488, + 734 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "By strategically designing communication patterns and coordinating actions, multi-agent systems can achieve more sophisticated reasoning by harnessing the specialized capabilities of multiple agents (Guo et al., 2024b). Effective communication design involves establishing structured message exchanges and interaction patterns among agents, while action coordination focuses on reconciling diverse outputs and achieving consensus to determine the final action in the environment.", + "bbox": [ + 109, + 744, + 883, + 821 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "3.3.1 Designing Communication Patterns", + "text_level": 1, + "bbox": [ + 109, + 838, + 437, + 854 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "A common communication pattern in multi-agent frameworks involves engaging multiple agents in debates or discussions (Liang et al., 2023b). For instance, the RECONCILE framework (Chen et al., 2023c) requires each agent to generate an answer accompanied by an explanation and a confidence score. The agents then participate in multi-round discussions to refine their responses, and a confidence-weighted voting mechanism", + "bbox": [ + 109, + 864, + 883, + 926 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 948, + 508, + 959 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "aggregates the answers into a consensus. Similarly, SocraSynth (Chang, 2024) employs opposing LLM agents moderated by predefined contentiousness levels to explore diverse perspectives. Additionally, GroupDebate (Liu et al., 2024e) organizes agents into groups that conduct internal debates before sharing their results, reducing token costs while maintaining robust logical reasoning capabilities.", + "bbox": [ + 109, + 103, + 883, + 165 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Besides decentralized communication, prior works also consider sending messages to a central node for decision making. For example, Suzgun & Kalai (2024b) employs a language model as a multi-faceted conductor that is good at handling and integrating various queries. Moreover, AgentCood (Pan et al., 2024a) assigns an LLM the role of a central planner for coordination strategy generation and agent assignment. Compared with decentralized communication, it can lead to more efficient resource allocation but increase the system vulnerability to potential failure of the central node.", + "bbox": [ + 109, + 171, + 883, + 263 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "3.3.2 Coordinating Action", + "text_level": 1, + "bbox": [ + 112, + 277, + 323, + 292 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Effective action coordination among multiple agents is important for achieving the shared goals, especially given a dynamic and complex environment. Prior works explore various strategies which can enable agents to synergise agents' actions and optimize overall system reasoning and problem-solving performance. This approach leverages the strengths of different LLMs to overcome the limitations of individual models.", + "bbox": [ + 109, + 301, + 883, + 364 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "One straightforward coordination strategy is chaining agents in a row, where agents can perform reasoning based on other agents' outputs. For example, Mixture-of-Agents (MoA) (Wang et al., 2024c) capitalizes on the cooperative nature of LLMs, allowing models to generate higher-quality responses by integrating and synthesizing contributions from multiple agents, achieving state-of-the-art performance. Similarly, Meta-Reasoning Prompting (MRP) (Gao et al., 2024b) assigns each agent to dynamically select the most effective reasoning method from a reasoning pool for a specific task, enabling the integration of diverse strategies to efficiently address multiple tasks. In addition, CoMM (Chen et al., 2024c) makes agents respond to discussions based on different role-playings.", + "bbox": [ + 109, + 371, + 883, + 492 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Moreover, coordination action can incorporate dynamic adaptation to task requirements. For example, Magentic-One (Fourney et al., 2024) introduces a lead agent as Orchestrator to conduct dynamic planning based on varied tasks. Gabriel et al. (2024) proposes a framework that deals with multi-hop queries, produces and executes task graphs, chooses suitable tools, and dynamically adapts to real-time changes. Additionally, EVOAGENT (Yuan et al., 2024c) dynamically generates various agents suitable for the given task and select those with high-quality outputs for result generation.", + "bbox": [ + 109, + 500, + 883, + 590 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "4 Learning Algorithms", + "text_level": 1, + "bbox": [ + 112, + 608, + 331, + 627 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Before delving into methodologies for training reasoning models, we first describe the foundational learning algorithms used to train the reasoner's policy and verifiers. These algorithms are defined by their precise loss functions. Note that learning algorithms are independent of the data curation process, which will be discussed in detail in Section 5. We begin by presenting commonly used learning algorithms for training reasoning models in Section 4.1, followed by a discussion on training verifiers in Section 4.2.", + "bbox": [ + 109, + 641, + 883, + 717 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "4.1 Learning of Reasoner", + "text_level": 1, + "bbox": [ + 112, + 733, + 316, + 750 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "This section is organized into three key parts: (1) imitation learning through supervised fine-tuning, (2) reinforcement learning, and (3) preference learning.", + "bbox": [ + 109, + 760, + 883, + 792 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "4.1.1 Imitation Learning - Supervised Fine-tuning", + "text_level": 1, + "bbox": [ + 109, + 805, + 500, + 821 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Supervised fine-tuning (SFT) maximizes the log probabilities of the next token $y_{i}$ given the input prompt $x$ and previously generated tokens $y_{< i}$ . Training the policy model $\\pi_{\\theta}$ generally includes the steps to minimize the following loss function:", + "bbox": [ + 109, + 830, + 883, + 878 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\mathrm {S F T}} (\\theta) = \\mathbb {E} _ {x, y \\sim \\mathcal {D}} \\left[ \\sum_ {i} ^ {T} - \\frac {1}{T} \\log \\left(\\pi_ {\\theta} \\left(y _ {i} \\mid y _ {< i}, x\\right)\\right) \\right], \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 325, + 886, + 883, + 929 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 946, + 508, + 960 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "where $\\mathcal{D}$ is the SFT dataset that comprises inputs $x$ and ground truth labels $y$ . The ground truth labels can be either human-written or AI-generated reasoning process and answer response. The loss is equivalent to the next token prediction objective where the prompt input tokens are masked out and do not contribute to the loss. SFT is the often the default first (or only) step to train a base LLM to produce reasoning chains in zero-shot settings. SFT has also popularly used as an effective way to train smaller LLMs to imitate outputs generated by larger, more powerful LLMs, in a process known as knowledge distillation (Xu et al., 2024c).", + "bbox": [ + 109, + 103, + 883, + 196 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "4.1.2 Reinforcement Learning for Reasoning", + "text_level": 1, + "bbox": [ + 111, + 209, + 460, + 224 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Stiannon et al. (2020) and Ouyang et al. (2022) pioneered the application of reinforcement learning (RL), particularly proximal policy optimization (PPO) (Schulman et al., 2017), to improve not only reasoning capabilities but also the helpfulness and harmlessness of LLMs. Their work catalyzed a wave of innovations in preference learning and RL-based optimization techniques, as evidenced by subsequent studies (Rafailov et al., 2023; Ahmadian et al., 2024; OpenAI et al., 2024; DeepSeek-AI et al., 2025; Ramesh et al., 2024).", + "bbox": [ + 109, + 234, + 883, + 313 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Markov decision process. Most reinforcement learning (RL) approaches model text generation as a Markov Decision Process (MDP). In this framework, the process is defined by the following components:", + "bbox": [ + 111, + 325, + 883, + 357 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "A set of states $S$", + "- A set of actions $\\mathcal{A}$ ,", + "- A state-action transition distribution $P(s_{t + 1}|s_t,a_t)$ controlled by the environment,", + "- A reward function $R(s_{t},a_{t})\\in \\mathbb{R}$ that provides a scalar reward, and", + "- A policy $\\pi (a_t|s_t)$ , which determines the actions to take based on the current state." + ], + "bbox": [ + 114, + 373, + 725, + 486 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "At each time step $t$ , for a given state $s_t \\in S$ , the agent selects an action $a_t$ and transitions to a new state $s_{t+1}$ , receiving a reward $R(s_t, a_t)$ from the environment. The set of available actions at state $s_t$ may be restricted to a subset of $\\mathcal{A}$ , denoted $\\mathcal{A}_{s_t}$ (i.e., $a_t \\in \\mathcal{A}_{s_t}$ ). In the context of autoregressive language modeling with LLMs, generally the next token depends on all the previous tokens. As such, in order to apply RL training for LLMs, one needs to define the states and actions of the problem such that they both satisfy the temporal dependency constraint of the language modeling task as well as the Markov property. One common approach is to define that the current state $s_t$ fully encapsulates all relevant information about the environment, in other words all previous tokens. This means the next state $s_{t+1}$ depends solely on the current state $s_t \\in S$ and the chosen action $a_t \\in \\mathcal{A}_{s_t}$ . In this way, the current state no longer needs to retrieve information from the previous states to decide the next action. As such, the state transition is agnostic to the history or previous states and actions. Within this MDP framework, the goal of RL is to learn a policy model that selects optimal actions by maximizing the expected cumulative rewards (Eq. 1).", + "bbox": [ + 109, + 505, + 883, + 688 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Action := token: Actions are defined at the token level, making the action space $\\mathcal{A}_{s_t}$ is finite and equal in size to the vocabulary. The state $s_t$ consists of all preceding tokens, including the input prompt and previously generated output tokens. The next state $s_{t+1}$ is defined as the concatenation of the current state $s_t$ and the action taken $a_t$ , i.e., $s_{t+1} \\coloneqq [s_t; a_t]$ . This category of methods defines rewards and related measures, such as values and advantages, at the token level. Works adopting this approach include most standard RLHF methods (Ouyang et al., 2022; Zheng et al., 2023b; Lee et al., 2023) as well as more recent fine-grained process-rewarding approaches (Yuan et al., 2024b; Cui et al., 2025).", + "- **Action := token chunk (step):** In this category of methods, actions are defined at the level of token chunks that semantically represent a reasoning step, separated by a special delimiter. As a result, the action space is infinite. The state $s_t$ consists of the prompt and the output tokens generated in previous reasoning steps. Rewards, value scores, and advantages are computed at the step level, with all tokens within a reasoning step $a_t$ sharing the same step-level score. This approach is particularly prominent in process supervision pipelines, as exemplified by DeepSeek-Math and VinePPO (Shao et al., 2024; Kazemnejad et al., 2024)." + ], + "bbox": [ + 114, + 703, + 883, + 925 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 946, + 509, + 960 + ], + "page_idx": 19 + }, + { + "type": "table", + "img_path": "images/d2d3ae2651fba2b47af954ed5cd41fbafa1fee7fb129a2b951985ad6c1721ac0.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
TypeState stAction atAction spaceExample work
Action := tokenAll previous to-kens (prompt and current response tokens)one tokenfinite, vocabulary size(Ouyang et al., 2022; Zheng et al., 2023b; Lee et al., 2023)
Action := stepAll previous tokens of prompt and previous stepsa chunk of tokens representing a “reasoning step”, separated by a special delimiterinfinite(Shao et al., 2024) (process supervision), (Kazemnejad et al., 2024)
Action := full re-sponsePromptentire responseinfinite(Shao et al., 2024) (outcome supervision), (DeepSeek-AI et al., 2025)
", + "bbox": [ + 133, + 99, + 866, + 306 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Table 5: Definitions of MDP states and actions across different training schemes.", + "bbox": [ + 205, + 316, + 789, + 332 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "- Action := full response: In this category, the entire response—comprising all output tokens—is treated as a single action. This transforms the reasoning problem into a one-step MDP with an infinite action space. This approach has been recently popularized by DeepSeek-R1 (DeepSeek-AI et al., 2025) and previously by DeepSeek-Math (outcome supervision) (Shao et al., 2024). A unique aspect of this formulation is that the full response may semantically include multiple reasoning steps, such as spontaneous backtracking and self-evaluation behaviors, as observed in DeepSeek-R1 (DeepSeek-AI et al., 2025). Regardless of the number of humanly recognizable reasoning steps within the response, the entire output is still considered a single action. To assign token-level value scores, rewards, and advantages, Shao et al. (2024); DeepSeek-AI et al. (2025) compute these values based on the full response $a_{t}$ and then distribute them uniformly across all tokens, similar to the step-level action setting. This formulation aligns with the concept of \"bandit\" prediction (with infinite action space) in REINFORCE-style RL (Nguyen et al., 2017; Kreutzer et al., 2017).", + "bbox": [ + 114, + 359, + 883, + 542 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Proximal Policy Optimization (PPO). As one of the primary variants of policy gradient methods, PPO has remained a popular and widely used RL algorithm (Schulman et al., 2017). To train the policy $\\pi_{\\theta}$ , PPO utilizes two additional models: the reference model $\\pi_{\\theta_{\\mathrm{ref}}}$ , which represents the initial state of the policy, and the value model $V$ , which estimates the state value $V(s_{t})$ . PPO begins by sampling a state-action trajectory $\\tau$ with consecutive state-action pairs $s_{t+1} \\sim (s_{t}, a_{t})$ , then collects the respective intermediate or process reward (if available) and final (outcome) reward. Then, it computes the advantage $A(s_{t}, a_{t})$ of each action $a_{t}$ given the current state $s_{t}$ , which is defined as the relative strength of that specific action $a_{t}$ compared to the probability-weighted actions that the policy could probably have taken from $s_{t}$ . The advantage is formulated as", + "bbox": [ + 109, + 563, + 883, + 700 + ], + "page_idx": 20 + }, + { + "type": "equation", + "text": "\n$$\nA \\left(s _ {t}, a _ {t}\\right) := Q \\left(s _ {t}, a _ {t}\\right) - V \\left(s _ {t}\\right) := Q \\left(s _ {t}, a _ {t}\\right) - \\mathbb {E} _ {a _ {t} ^ {\\prime}} \\left[ Q \\left(s _ {t}, a _ {t} ^ {\\prime}\\right) \\right], \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 289, + 712, + 883, + 729 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "where $Q(s_{t},a_{t})$ represents the expected cumulative total reward that the policy is expected to obtain if it takes action $a_{t}$ from $s_{t}$ and continue to follow the current policy, while $V(s_{t})$ denotes the expected total rewards obtainable from state $s_{t}$ , known as the state value. The state value is equivalent to the expected value of $Q(s_{t},a_{t}^{\\prime})$ marginalized over all probable actions the current policy $\\pi_{\\theta}$ may take from $s_{t}$ . If $A(s_{t},a_{t}) > 0$ , the action $a_{t}$ is encouraged, conversely, if $A(s_{t},a_{t}) < 0$ , the action $a_{t}$ is discouraged. After computing the advantages, PPO optimizes the policy $\\pi_{\\theta}$ according to the following loss function.", + "bbox": [ + 109, + 742, + 883, + 833 + ], + "page_idx": 20 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\mathrm {P P O}} (\\theta) = \\mathbb {E} _ {\\tau \\sim \\pi_ {\\theta_ {0}}, P} - \\frac {1}{T} \\left[ \\sum_ {t = 0} ^ {T} \\operatorname {m i n} \\left(\\frac {\\pi_ {\\theta} (a _ {t} | s _ {t})}{\\pi_ {\\theta_ {o}} (a _ {t} | s _ {t})} A (s _ {t}, a _ {t}), \\operatorname {c l i p} \\left(\\frac {\\pi_ {\\theta} (a _ {t} | s _ {t})}{\\pi_ {\\theta_ {o}} (a _ {t} | s _ {t})}, 1 - \\epsilon , 1 + \\epsilon\\right) A (s _ {t}, a _ {t})\\right) \\right], \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 133, + 845, + 883, + 887 + ], + "page_idx": 20 + }, + { + "type": "page_footnote", + "text": "5The O-1 model series (OpenAI et al., 2024) also exhibit such behaviors, though the training approach for O-1 remains undisclosed.", + "bbox": [ + 112, + 898, + 883, + 922 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 948, + 506, + 959 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "where $t \\in [0, T]$ is a time step within trajectory $\\tau$ , $\\pi_{\\theta_o}$ is the fixed policy from previous episode or iteration, and $P$ is the transition distribution. The clip function, applied to the probability ratio $\\frac{\\pi_{\\theta}(a_t|s_t)}{\\pi_{\\theta_o}(a_t|s_t)}$ , ensures that the policy does not deviate too drastically or rapidly from its previous version. This also helps prevent catastrophic failure or suboptimal local solutions. Additionally, a KL divergence term $\\mathcal{D}_{\\mathrm{KL}}(\\pi_{\\theta}||\\pi_{\\theta_{\\mathrm{ref}}})$ is often incorporated into the loss function to constrain exploration during the later stages of training. $\\pi_{\\theta_{\\mathrm{ref}}}$ is often a fixed initial reference policy that we do not want our policy to deviate too much from, while $\\pi_{\\theta_o}$ is a snapshot of the current policy from the previous iteration which is updated regularly. Throughout the training process, both the policy $\\pi_{\\theta}$ and value model $V$ are iteratively updated.", + "bbox": [ + 109, + 103, + 885, + 232 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "REINFORCE & RLOO. REINFORCE is another popular policy gradient method (Sutton, 2018; Williams, 1992; Nguyen et al., 2017; Kreutzer et al., 2017) for RL. This method seeks to optimize the reward weighted objective of the entire response as:", + "bbox": [ + 109, + 243, + 885, + 292 + ], + "page_idx": 21 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\mathrm {R E I N F O R C E}} (\\theta) = \\mathbb {E} _ {x \\sim \\mathcal {D}, y \\sim \\pi_ {\\theta} (\\cdot | x)} [ (R (y, x) - b) \\nabla_ {\\pi_ {\\theta}} \\log \\pi_ {\\theta} (y | x) ] \\qquad (5)\n$$\n", + "text_format": "latex", + "bbox": [ + 277, + 297, + 883, + 316 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "where $R(y, x)$ represents the final reward for output $y$ given input $x$ and $b$ is a baseline term introduced to reduce the variance of the gradient estimates. A widely used choice for $b$ is the moving average of all rewards observed during training (Williams, 1992; Ahmadian et al., 2024).", + "bbox": [ + 109, + 323, + 885, + 371 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Recently, the REINFORCE Leave-One-Out (RLOO) method (Kool et al., 2019; Ahmadian et al., 2024) has been proposed, which replaces the traditional baseline calculation with the leave-one-out average of trajectory rewards obtained through Monte Carlo (MC) sampling, as shown in Eq. 6", + "bbox": [ + 109, + 376, + 885, + 422 + ], + "page_idx": 21 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\mathrm {R L O O}} (\\theta) = \\frac {1}{k} \\sum_ {i = 1} ^ {k} [ R (y _ {i}, x) - \\frac {1}{k - 1} \\sum_ {j \\neq i} R (y _ {j}, x) ] \\nabla_ {\\pi_ {\\theta}} \\log \\pi_ {\\theta} (y _ {i} | x) \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 269, + 433, + 885, + 476 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "where $k$ denotes the number of Monte Carlo samples. Unlike PPO, these algorithms do not rely on a parameterized value function (critic model) and instead depend solely on observed rewards. These methods share similarities with approaches such as Group-Relative Policy Optimization (GRPO) (Ramesh et al., 2024) and VinePPO (Kazemnejad et al., 2024), which will be discussed in detail below.", + "bbox": [ + 109, + 482, + 888, + 544 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Group-Relative Policy Optimization (GRPO). This algorithm has gained recent popularity through DeepSeek-R1 DeepSeek-AI et al. (2025), though it was also explored in earlier studies such as (Shao et al., 2024; Yang et al., 2024b;a; Team, 2024). It employs the same clipped surrogate objective as PPO, defined in Eq. 4 (Schulman et al., 2017). However, unlike PPO, which uses a parameterized value model to estimate the advantage $A(s_{t},a_{t})$ , this approach samples a group $G = [o_{1},o_{2},\\dots,o_{g}]$ of Monte-Carlo outputs for a given input $x$ . It then computes the corresponding rewards $R = [r_1,r_2,\\dots,r_g]$ , and determines the advantage of each output $o_i$ as the group-normalized reward", + "bbox": [ + 109, + 558, + 887, + 667 + ], + "page_idx": 21 + }, + { + "type": "equation", + "text": "\n$$\nA _ {\\mathrm {G R P O}} (s _ {i, t}, a _ {i, t}) = A _ {\\mathrm {G R P O}} (o _ {i}) = \\frac {r _ {i} - m e a n (R)}{s t d (R)}. \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 326, + 672, + 883, + 705 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Then, the algorithm optimizes the policy $\\pi_{\\theta}$ by minimizing the following loss function.", + "bbox": [ + 112, + 712, + 736, + 729 + ], + "page_idx": 21 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} L _ {\\mathrm {G R P O}} (\\theta) = - \\frac {1}{| G |} \\sum_ {i} ^ {| G |} \\frac {1}{T _ {i}} \\sum_ {t} ^ {T _ {i}} m i n \\left\\{\\frac {\\pi_ {\\theta} (a _ {i , t} | s _ {i , t})}{\\pi_ {\\theta_ {o}} (a _ {i , t} | s _ {i , t})} A _ {\\mathrm {G R P O}} (s _ {i, t}, a _ {i, t}), \\right. \\\\ \\left. \\operatorname {c l i p} \\left(\\frac {\\pi_ {\\theta} \\left(a _ {i , t} \\mid s _ {i , t}\\right)}{\\pi_ {\\theta_ {o}} \\left(a _ {i , t} \\mid s _ {i , t}\\right)}, 1 - \\epsilon , 1 + \\epsilon\\right) A _ {\\mathrm {G R P O}} \\left(s _ {i, t}, a _ {i, t}\\right) \\right\\} \\tag {8} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 199, + 737, + 883, + 816 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Variants of GRPO, such as DAPO (Yu et al., 2025), have also been introduced to alleviate issues with GRPO like length bias and inappropriate penalties for responses that exceed the context length.", + "bbox": [ + 109, + 821, + 885, + 854 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "4.1.3 Preference Learning", + "text_level": 1, + "bbox": [ + 112, + 868, + 323, + 883 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Preference learning, particularly learning from human feedback, is a widely used post-pretraining alignment stage for LLMs. Its goal is to encourage the generation of responses that align with human preferences or", + "bbox": [ + 109, + 893, + 885, + 926 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 946, + 509, + 960 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "desired values, such as helpfulness or harmlessness (Ouyang et al., 2022; Bai et al., 2022; Ganguli et al., 2022). The data collection process for this stage typically involves prompting an unaligned LLM to generate multiple responses for a given input. Human annotators are then presented with pairs of responses and asked to select the preferred one. The resulting preference dataset is used to train a reward model. This reward model subsequently provides online reward scores for policy trajectories during PPO training, a process commonly referred to as reinforcement learning from human feedback or RLHF (Schulman et al., 2017; Ouyang et al., 2022; Touvron et al., 2023), as well as AI feedback (Lee et al., 2023).", + "bbox": [ + 109, + 103, + 883, + 209 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Preference learning has evolved beyond conventional reinforcement learning (RL)-based methodologies with the introduction of Direct Preference Optimization (DPO) (Rafailov et al., 2023) and its subsequent variants (Ethayarajh et al., 2024; Lai et al., 2024; Hong et al., 2024; Saeidi et al., 2024; Meng et al., 2024; Azar et al., 2024). DPO proposes using the policy language model itself to directly model human reward preferences from the preference dataset. This formulation eliminates the need for a separately trained reward model, instead optimizing the policy on the preference dataset with a simple binary classification loss. Formally, the policy $\\pi_{\\theta}$ is optimized using a preference dataset $\\mathcal{D}$ by minimizing the loss function:", + "bbox": [ + 109, + 215, + 883, + 323 + ], + "page_idx": 22 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\mathrm {D P O}} (\\theta) = - \\mathbb {E} _ {(x, y _ {w}, y _ {l}) \\sim \\mathcal {D}} \\left[ \\log \\sigma \\left(\\beta \\log \\frac {\\pi_ {\\theta} (y _ {w} | x)}{\\pi_ {\\mathrm {r e f}} (y _ {w} | x)} - \\beta \\log \\frac {\\pi_ {\\theta} (y _ {l} | x)}{\\pi_ {\\mathrm {r e f}} (y _ {l} | x)}\\right) \\right], \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 245, + 327, + 883, + 362 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "where $y_{w}$ and $y_{l}$ represent the winning (chosen) and losing (rejected) outputs for input $x$ , respectively. DPO has gained popularity due to its simplicity and stability, bypassing the engineering complexity and challenges associated with PPO-based techniques. However, DPO is not without limitations, such as implicit biases toward longer responses and performance degradation over extended training periods (Ethayarajh et al., 2024; Meng et al., 2024). Subsequent advancements, including KTO (Ethayarajh et al., 2024), iPO (Azar et al., 2024), SimPO (Meng et al., 2024), ORPO (Hong et al., 2024), Step-DPO (Lai et al., 2024), and combination methods (Saeidi et al., 2024), have addressed many of these shortcomings.", + "bbox": [ + 109, + 366, + 883, + 470 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "While the above learning algorithms are formulated for single turn input-to-output tasks, it is also generalizable to multi-turn conversations as well as function-calling agentic workflows. In such scenarios, the next state $s_{t+1}$ may not always be a concatenation of all previous states $s_{\\leq t}$ and actions $a_{\\leq t}$ , but it also depends on incoming response $h_t$ from an outside environment, which can come from a follow-up user instruction or the returned result from a function call. In other words, one may define $s_{t+1} := [s_t; a_t; h_t]$ .", + "bbox": [ + 109, + 479, + 883, + 556 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "4.2 Learning of Verifiers and Reward Models", + "text_level": 1, + "bbox": [ + 109, + 571, + 464, + 585 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Verifiers play an important role in reasoning systems, improving performance both through training time credit assignment (Ouyang et al., 2022; Ziegler et al., 2019; Stiennon et al., 2020) and inference-time scaling verification (Snell et al., 2024). Reward modeling in the reasoning settings focuses on verifying the correctness of the reasoning chain, rather than evaluating using more general criteria, like helpfulness or safety (Ouyang et al., 2022). As a result, reward model training in reasoning is typically formulated as a binary classification problem between correct and incorrect reasoning steps. Based on label granularity, reward modeling is further categorized into outcome reward modeling (Section 4.2.1) and process reward modeling (Section 4.2.2). More recently, generative models for verification (Section 4.2.3) have emerged as a popular approach that produces actionable and explainable natural language feedback alongside rewards. In this section, we cover common training approaches for verifiers; In Section 6.1.3, we posit that verification itself may benefit from being studied as a reasoning problem itself, highlighting both concrete methods and recent analysis of failure modes in reasoning settings.", + "bbox": [ + 109, + 598, + 883, + 779 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "4.2.1 Outcome Reward Models (ORM)", + "text_level": 1, + "bbox": [ + 109, + 792, + 421, + 809 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "The goal of outcome reward models (ORMs) for reasoning is to provide a scalar reward for a full trajectory. Given a dataset $\\mathcal{D}$ of input prompt $x$ and sampled outputs $y$ with corresponding correctness label $c\\in \\{0,1\\}$ , the goal of outcome reward modeling is to train the outcome reward model $r_{\\theta}$ using the loss", + "bbox": [ + 109, + 819, + 883, + 864 + ], + "page_idx": 22 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\text {o r m}} (\\theta) = \\mathbb {E} _ {x, y \\sim \\mathcal {D}} [ c \\log \\sigma (r _ {\\theta} (x, y)) + (1 - c) \\log (1 - \\sigma (r _ {\\theta} (x, y))) ], \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 254, + 871, + 880, + 888 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "where $\\sigma$ is the sigmoid function. Alternatively, one can train ORMs with a pairwise formulation. Here, the correctness labels are not explicitly encoded in the loss function, but are used to categorize multiple sampled", + "bbox": [ + 109, + 893, + 883, + 925 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "outputs as correct or incorrect. From there, we can form pairs of outputs $\\{y_w, y_l\\}$ , where $y_w$ reaches the correct outcome (e.g., correct answer for a math problem) and $y_l$ reaches an incorrect outcome. The reward model $r_\\theta$ is then typically trained with the Bradley-Terry loss, similar to that in DPO training (Equation 9).", + "bbox": [ + 109, + 103, + 883, + 148 + ], + "page_idx": 23 + }, + { + "type": "equation", + "text": "\n$$\nL _ {\\text {o r m}} (\\theta) = - \\mathbb {E} _ {x, y _ {w}, y _ {l} \\sim D} \\left[ \\log \\left(\\sigma \\left(r _ {\\theta} (x, y _ {w}) - r _ {\\theta} (x, y _ {l})\\right)\\right) \\right], \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 305, + 162, + 883, + 188 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Many other pairwise loss functions can be employed, such as hinge loss or other margin-based losses, focal loss, or variations of the Bradley-Terry loss. However, recent work (Liu et al., 2024a) has categorized the impact of loss functions, finding that the typical Bradley-Terry loss yields the best-performing ORM.", + "bbox": [ + 109, + 198, + 883, + 244 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "4.2.2 Process Reward Models (PRM)", + "text_level": 1, + "bbox": [ + 112, + 258, + 410, + 276 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "While outcome reward models are relatively simple to train, outcome-driven verification may encourage incorrect reasoning chains that lead to the correct outcome. As such, recent work has sought to train process reward models (PRMs) to assess correctness for each step in the solution. This requires more fine-grained labels than ORM training. Specifically, assume that for an output $y = (a_{1},\\dots ,a_{T})$ , we obtain process-level supervision of the form $c_{1},\\ldots ,c_{T}$ , where $c_{t}$ is a binary indicator of step $a_{t}$ correctness. Then, the step-wise cross-entropy loss below is applied.", + "bbox": [ + 109, + 286, + 883, + 377 + ], + "page_idx": 23 + }, + { + "type": "equation", + "text": "\n$$\nL _ {p r m} (\\theta) = \\mathbb {E} _ {x, y \\sim \\mathcal {D}} \\left[ - \\frac {1}{T} \\sum_ {t = 1} ^ {T} \\left(c _ {t} \\log \\sigma \\left(r _ {\\theta} \\left(x, y _ {\\leq t}\\right)\\right) + \\left(1 - c _ {t}\\right) \\log \\sigma \\left(1 - \\sigma \\left(r _ {\\theta} \\left(x, y _ {\\leq t}\\right)\\right)\\right) \\right] \\right. \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 205, + 390, + 883, + 431 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Above, $y_{\\leq t}$ denotes the output prefix up to and including step $t$ . In practice, collecting step-level annotations $c_t$ can be extremely expensive. As a result, recent work has used variants of Monte Carlo Tree Search to automatically obtain said annotations. Specifically, the annotation for a reasoning step is obtained by rolling out the response until completion from the intermediate step, then using the outcome accuracy as a proxy for correctness (Wang et al., 2024g; Jiao et al., 2024a; Wang et al., 2024k; Dou et al., 2024a; Luo et al., 2024b; Setlur et al., 2024b). As a concrete example, suppose we roll out five completions randomly from the same prefix $y_{\\leq t}$ , with three rollouts arriving at the correct answer. Then, the confidence that the prefix $y_{\\leq t}$ is correct can be approximated as 0.6. These coarse signals can then be used to train a PRM. These two general approaches to constructing PRM training data have associated pros and cons: Collecting human annotations is expensive, but does not overfit PRM training to one particular policy. MCTS-based approaches yield annotations relatively quickly, but do not generalize beyond the policy from which samples are collected (Zheng et al., 2024; Setlur et al., 2024a).", + "bbox": [ + 109, + 441, + 883, + 625 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "4.2.3 Generative Verifiers", + "text_level": 1, + "bbox": [ + 112, + 638, + 320, + 654 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "ORMs and PRMs are discriminative verifiers, and are therefore unable to generate natural language to support their scores. However, natural language reasoning for evaluations is valuable both as actionable feedback and as an explainable mechanism. As a result, generative verifiers have been proposed to assess responses and provide natural language feedback. Generative verifiers have progressed from prompting frontier LLMs to evaluation-specific finetuning, relying on many of the same learning algorithms presented in Section 4.1. As such, the focus of this section is largely on training data curation.", + "bbox": [ + 109, + 666, + 883, + 756 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Finetuned generative verifiers Generative verifiers are broadly classified as critique models or LLM-as-judge models. Critique models typically take as input a question and model response, and produce a critique with actionable feedback in natural language. The foundation of critique model training is critique training data. To construct training data, intentionally incorrect outputs are sampled from a policy model. Then, these outputs are corrected, usually with stronger model or human annotations. Using such samples, past methods (Wang et al., 2023c; Xi et al., 2024) have employed SFT (Section 4.1.1) to train critique models to imitate critiques. Other methods (Yao et al., 2023c; McAleese et al., 2024) have used used the typical RLHF workflow (Section 4.1.3), first training a reward model to use during PPO training. More recently, outcome-based RL (e.g., GRPO, as presented in Section 4.1.2) has been used for training, relying on either hand-crafted rewards (Akyurek et al., 2023) or execution feedback for code critique (Xie et al., 2025).", + "bbox": [ + 109, + 773, + 883, + 925 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "LLM-as-judge models are a more general class of generative verifiers trained to evaluate model responses based on different protocols (pairwise evaluation, 1-5 rating, binary classification). These models rely on preference datasets, either annotated by a strong model or by humans. For example, to train a pairwise LLM-as-judge, one would collect a dataset of paired model responses for a given input prompt, then ask either a human or strong LLM to pick which response is better. Then, natural language explanations are distilled from stronger models, with distilled samples being categorized as correct or incorrect if the preference matches the annotation. From here, earlier LLM-as-judges (e.g., (Li et al., 2023b; Zheng et al., 2023a)) trained with SFT (Section 4.1.1), while newer approaches (Wang et al., 2024f; Hu et al., 2024) have used DPO (Section 4.1.3).", + "bbox": [ + 109, + 103, + 883, + 241 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Discriminative-generative hybrid verifiers Because generation is a more difficult task than classification, generative verifiers have often lagged discriminative reward models in benchmark performance. Recent work (Zhang et al., 2024f; Mahan et al., 2024) has sought to unify the two under the Generative Reward Model umbrella. Here, models use similar datasets to those used to train LLM-as-judge models, but augment the SFT loss with an answer-token loss. Concretely, given a dataset $\\mathcal{D}$ with samples comprised of an input $x$ , model response $y$ , and outcome label $c$ (e.g., \"Yes\"/\"No\" for correctness), the loss", + "bbox": [ + 109, + 255, + 883, + 347 + ], + "page_idx": 24 + }, + { + "type": "equation", + "text": "\n$$\nL _ {G e n R M} (\\theta) = - \\mathbb {E} _ {x, y, c \\sim \\mathcal {D}} \\left[ \\log \\left(\\pi_ {\\theta} (c | x, y) \\right] \\right. \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 357, + 359, + 883, + 376 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "is added to the typical language generation losses (e.g., SFT or DPO loss) that are used to train the model to produce natural language explanations. Here, $\\pi_{\\theta}$ is the generative reward model being trained.", + "bbox": [ + 109, + 387, + 883, + 417 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "5 Learning to Reason", + "text_level": 1, + "bbox": [ + 112, + 438, + 331, + 455 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "In Section 3, we explored various methods for enhancing reasoning through inference-time computation. While these approaches have proven effective in many scenarios, they come with notable limitations, such as constrained improvements in reasoning capabilities (since model parameters remain unchanged) and the requirement for substantial computational resources during inference. With the advent of OpenAI o1 (OpenAI et al., 2024), there has been a growing emphasis on improving reasoning through training-time methods. Recently, Deepseek-R1 (DeepSeek-AI et al., 2025) demonstrated that training-time approaches can achieve reasoning improvements comparable to, or even surpassing, those of inference-scaling methods. Reflecting this trend, this section delves deeper into the role of training in advancing reasoning capabilities.", + "bbox": [ + 109, + 470, + 883, + 592 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Specifically, we explore the data recipe, which focuses on constructing data (reasoning trajectories) tailored for reasoning tasks to facilitate training. At a high level, trajectory collection can be viewed as a form of simulation, where the generator produces reasoning steps—potentially incorporating calls and outputs from external tools—in response to either synthetic or real-world inputs. The primary challenge lies in ensuring that this simulation is both realistic and diverse while simultaneously providing meaningful supervision (reward) throughout the process. Depending on the architecture, as outlined in Section 2.3, this typically involves designing inputs (such as perception in single-agent systems or interaction in multi-agent systems) and outputs (such as actions in single-agent systems or coordination in multi-agent systems).", + "bbox": [ + 109, + 599, + 883, + 720 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Furthermore, we explore the model recipe. Depending on the learning algorithms (Section 4), the model recipe can be 'offline' (non-RL, e.g., SFT and offline RL, e.g. DPO), which focuses on extracting supervision (reward) from the collected trajectories and leveraging them for training. It can also be 'online' (most of RL algorithms, e.g., GRPO and PPO), where there is no need to collect trajectories beforehand, but learning occurs directly on the questions and their rewards. Similar to Section 3, we start with standalone LLMs, detailing how each of their components is trained (Section 5.1). Building on this foundation, we expand the discussion to single-agent systems (Section 5.2) and multi-agent systems (Section 5.3).", + "bbox": [ + 109, + 728, + 883, + 834 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "5.1 Learning to Reason with Standalone LLM", + "text_level": 1, + "bbox": [ + 109, + 851, + 470, + 867 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "This section examines how standalone LLMs can be trained for reasoning tasks. For 'offline' methods, the process typically involves collecting reasoning trajectories, that lead to both correct and incorrect outcomes, followed by further training the LLM on these trajectories. In contrast, for 'online' methods, learning occurs", + "bbox": [ + 109, + 878, + 883, + 925 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 946, + 508, + 960 + ], + "page_idx": 24 + }, + { + "type": "table", + "img_path": "images/f29e00273e605da0f7119a68c2e6571b38f75b90addfb3732aebec1cebca67eb.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
PerspectiveMethodCharacteristicRepresentative Work
Constructing PromptsQuestion AugmentationExpand knowledge depth and breadth of seed questionsLuo et al. (2023b); Yu et al. (2024c)
Graph-based SynthesisSynthesize prompts guided by structured taxonomyLi et al. (2024a); Tang et al. (2024)
Collecting TrajectoriesRejection SamplingFilter low-quality trajectories from current policyDong et al. (2023)
Special Reasoning PatternImitate human-like reasoning behaviorYuan et al. (2024a); Qin et al. (2024)
Reasoning DistillationDistill reasoning capability from frontier reasoning modelHuang et al. (2024d)
Training from TrajectoriesImitation LearningLearn the behavior directly from the collected trajectoriesYu et al. (2024c)
Preference LearningOptimize preference between pos. and neg. trajectoriesJiao et al. (2024a)
Latent ReasoningCompress trajectory length using implicit reasoning tokensHao et al. (2024b)
", + "bbox": [ + 117, + 101, + 879, + 210 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Table 6: Summary of learning to reason with standalone LLM.", + "bbox": [ + 269, + 220, + 723, + 234 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "directly based on the sampled reasoning chains and their corresponding rewards. While much of the research focus has been on sampling high-quality outputs (i.e., trajectories), methods for generating a robust and diverse set of problems, or model inputs, have also garnered attention. We begin by detailing the process of collecting trajectories, which includes constructing inputs (Section 5.1.1) and obtaining outputs (Section 5.1.2). Subsequently, we describe how the LLM can be trained using the collected trajectories (Section 5.1.3).", + "bbox": [ + 109, + 266, + 883, + 342 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "5.1.1 Constructing High-quality Prompts for Reasoning", + "text_level": 1, + "bbox": [ + 109, + 359, + 542, + 378 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "To effectively drive knowledge distillation and model-seeking, we must curate a diverse collection of high-quality prompts that comprehensively span the target knowledge space. Relying on a narrow or homogeneous prompt set—even when sourced from a strong base model—limits exploration and undermines both distillation and reinforcement learning processes. By contrast, carefully crafted prompts expand the model's exploratory capacity, yielding richer representations and more robust downstream performance. As such, this section covers methods for collecting or synthesizing more challenging prompts.", + "bbox": [ + 109, + 387, + 883, + 479 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Question augmentation A straightforward approach to generating additional inputs is to directly augment existing datasets using frontier LLMs. For example, Xu et al. (2024a) propose using LLMs to \"evolve\" existing prompt sets, expanding their depth (e.g., more complex instructions) and breadth (e.g., rarer concepts). Yu et al. (2024c) have proposed two main approaches to augment existing questions. One is simply rewriting using frontier LLMs, and the other one is self-verification, which transforms an condition in the question into unknown variable, shows the original answer, and proposes a new question by querying the value of the unknown variable. Luo et al. (2023b) adopt a comparable strategy, employing a question generator to iteratively produce both harder and easier versions of a given question, as inspired by the instruction evolution approach of Xu et al. (2024a). The synthesized instructions are further refined using a reward model to ensure quality.", + "bbox": [ + 109, + 497, + 883, + 650 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Knowledge graph-based synthesis Directly augmenting prompts with LLMs can increase the size of the training set but does not inherently enhance diversity. To address this, knowledge graphs—structured taxonomies for organizing reasoning domains—have been utilized to construct input prompts with broader coverage. For instance, Li et al. (2024a) employ a frontier LLM to generate a knowledge graph directly, while Tang et al. (2024) task a frontier LLM with extracting a taxonomy from a seed dataset. These knowledge graphs are then used to progressively synthesize challenging questions, which are subsequently used to prompt larger teacher LLMs, resulting in high-quality instruction-tuning datasets with wider knowledge coverage. Additionally, Jiao et al. (2024b) leverage relation graphs derived from web documents to synthesize pretraining data, improving relation-based logical reasoning capabilities.", + "bbox": [ + 109, + 667, + 883, + 805 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "5.1.2 Collecting High-quality Reasoning Trajectories", + "text_level": 1, + "bbox": [ + 109, + 821, + 517, + 838 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Beyond constructing high-quality prompts, researchers also refine outputs to collect better trajectories for training. These techniques often sample outputs that follow specific reasoning patterns, such as lengthy reasoning processes with self-reflection, and retain those that meet higher quality standards based on ground-truth labels. Consistent with our architecture definitions in Sec. 2.3, we treat the learned verifier as part of the environment in the agentic system. Consequently, this section focuses exclusively on methods that", + "bbox": [ + 109, + 849, + 883, + 925 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 488, + 946, + 509, + 960 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "utilize existing ground-truth labels—such as answer labels in maths or test cases for code generation—while deferring discussion of methodologies that rely on learned verifiers (reward models or LLM-judges) to Sec. 5.2.", + "bbox": [ + 111, + 103, + 883, + 133 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Rejection sampling Rejection sampling (Dong et al., 2023) aims to select higher-quality samples by repeatedly sampling from the policy model (reasoner). Quality is determined through two primary sources: (1) a learned verifier, which we discuss in Section 5.2, and (2) direct comparison with ground-truth labels (when available), where samples inconsistent with the ground-truth labels are discarded. Yuan et al. (2023) apply this idea to mathematical reasoning, introducing edit distance to ensure diversity among trajectories. Zelikman et al. (2022) propose STaR to incorporate the correct answer into the instruction, prompting LLMs to iteratively refine incorrect reasoning traces and generate higher-quality trajectories. Tong et al. (2024) employ an up-sampling strategy to increase the proportion of successful trajectories for more challenging questions. This approach has become a standard technique for iterative model self-improvement, as demonstrated in works such as (Jiao et al., 2025; Guan et al., 2025; Dou et al., 2024b).", + "bbox": [ + 111, + 148, + 883, + 301 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Encourage special reasoning pattern Another line of research focuses on leveraging human-like reasoning behaviors—such as self-reflection, deep reasoning, and thinking-before-action—to improve reasoning accuracy and reduce hallucinations. One notable approach is Reasoning-as-Planning (RAP) (Hao et al., 2023), which divides reasoning into three steps: thinking, taking action, and observing (inferring) changes in the environment. When applied to text-based reasoning problems, LLMs simulate environment states after taking actions, leading to more accurate reasoning. Building on this idea, Yuan et al. (2024a) and Chen et al. (2023a) use frontier LLMs like GPT-3.5 and GPT-4 to synthesize trajectories with this pattern for reasoning problems, facilitating imitation learning.", + "bbox": [ + 109, + 316, + 883, + 436 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Besides, inspired by the success of long and deep reasoning revealed by OpenAI's o1 model, which incorporate self-reflection and search, some researchers propose imitating this process through rule-based synthesis. For instance, Qin et al. (2024) flatten MCTS trajectories, including failed branches, and ask general models to generate bridge sentences for natural transition from the failed nodes to the ones along the successful paths.", + "bbox": [ + 111, + 444, + 883, + 505 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Reasoning distillation Several studies distill reasoning patterns from models capable of producing good reasoning chains (e.g., OpenAI o1) to replicate similar behaviors in smaller models. For example, Huang et al. (2024d), NovaSky Team (2025), Bespoke Labs (2025) and Muennighoff et al. (2025) distill reasoning chains from models like OpenAI-o1, Qwen-QWQ-32B, DeepSeek-R1, and Gemini Thinking Experimental, respectively. Min et al. (2024) diversify this approach by distilling from multiple reasoning models and aggregating outputs into a unified format.", + "bbox": [ + 111, + 520, + 883, + 611 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "5.1.3 Training from Trajectories", + "text_level": 1, + "bbox": [ + 112, + 626, + 366, + 642 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Using the collected trajectories, training can be conducted by designing the input and output formats for the algorithms discussed in Section 4.", + "bbox": [ + 111, + 651, + 882, + 681 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Supervised Fine-Tuning (SFT) As discussed in Sec. 4.1.1, the most straightforward approach to training reasoning-capable LLMs is to fine-tune a model using SFT on collected trajectories. Methods such as (NovaSky Team, 2025; Bespoke Labs, 2025; Huang et al., 2024d) and (Min et al., 2024) utilize SFT with a modest number of data samples (4K-20K) to replicate the reasoning capabilities of OpenAI's o1 model. Recent SFT approaches have shifted focus to data scaling, with Xu et al. (2025e) exploring the impact of increasing data quantity up to 1 million CoT samples. Their findings demonstrate that performance improves with data scale, albeit with diminishing returns. In contrast, Muenighoff et al. (2025) adopt a sample-efficient approach, curating a high-quality 1K-sample reasoning dataset for fine-tuning. They show that this smaller dataset, combined with strategic inference-time prompting, achieves performance comparable to models trained on larger datasets. Similar strategies have been applied in domain-specific reasoning models, such as earlier math reasoning systems Yu et al. (2023a); Yue et al. (2023).", + "bbox": [ + 111, + 696, + 883, + 864 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Preference learning and reinforcement learning While SFT approaches have shown effectiveness, other studies demonstrate that preference learning further enhances performance. Min et al. (2024) study DPO, while Xu et al. (2025e) explore various post-training preference learning methods. Hui et al. (2024),", + "bbox": [ + 111, + 878, + 883, + 925 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 488, + 948, + 508, + 959 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Min et al. (2024), and Jiao et al. (2024a) all employ DPO with preference pairs derived from code test cases, outcome correctness, and a PRM trained on automatic supervision, respectively. Another line of work focuses on step-level DPO to optimize reasoning action selection. Specifically, Zhang et al. (2024h) use Tree-of-Thought (Yao et al., 2023a) to estimate outcome rewards and backpropagate them to intermediate nodes for quality assessment. Step-level DPO is then applied to pairs sharing the same trajectory prefix but with contrasting next actions. Lai et al. (2024) directly use GPT-4o to identify the earliest incorrect reasoning step and construct contrastive step-level DPO pairs for preference learning. Yuan et al. (2024d) adopt an iterative DPO approach in a self-rewarding setting, where the policy model itself acts as an LLM-as-judge to progressively improve its capabilities.", + "bbox": [ + 109, + 103, + 883, + 241 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "In addition to preference learning, RL with verifiable answer labels also demonstrate importance in improving reasoning, where rule-based rewards by checking the correctness of sampled solutions are employed rather than reward models.6 Lambert et al. (2024) use both math reasoning and instruction following data for outcome-based reinforcement learning without reward models. Deepseek-R1 (DeepSeek-AI et al., 2025) further reveal the potential of pure reinforcement learning with verifiable answers. Yu et al. (2025) provide valuable reproduction of Deepseek-R1 on Qwen2.5-32B, including open-sourced data, code, and technical details about loss function design, reward shaping, and dynamic sampling.", + "bbox": [ + 109, + 247, + 883, + 353 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Training with latent reasoning Typical reasoning models generate long reasoning chains and have demonstrated strong empirical performance. However, this comes at the cost of increased inference time, as they produce lengthy natural language reasoning traces. These traces often contain many tokens that improve the flow and coherence of the output, with only a small fraction directly contributing to the reasoning process. To address this inefficiency, an alternative approach, known as latent reasoning, focuses on representing reasoning trajectories implicitly. This is achieved either by omitting intermediate reasoning tokens entirely or by compressing them into specialized reasoning tokens or continuous vector representations.", + "bbox": [ + 109, + 367, + 883, + 474 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Earlier work in continuous reasoning focused on compressing natural language reasoning chains into a smaller number of tokens. Deng et al. (2023b) employ knowledge distillation to encode the knowledge from natural language reasoning tokens into intermediate representations of the student model. During inference, the model generates only the final answer without producing additional rationale. This approach is further refined through curriculum learning (Deng et al., 2024b), which gradually removes reasoning tokens during training to reduce distribution mismatch.", + "bbox": [ + 109, + 479, + 883, + 571 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "However, removing all explicit intermediate reasoning tokens may compromise the model's expressivity (i.e., ability to articulate complex reasoning) (Prystawski et al., 2023). A natural trade-off is to retain a limited number of reasoning tokens, making them implicit to enhance expressiveness while preserving performance. Goyal et al. (2024) introduce learnable tokens during pre-training and fine-tuning within standard CoT trajectories, enabling the model to perform additional computation before generating an output token. Wang et al. (2023d) explore various techniques for compressing reasoning steps from training trajectories into a fixed set of planning tokens. At the start of each reasoning step, the model generates a planning token, whose encoded \"knowledge\" guides the generation of more coherent outputs. Hao et al. (2024b) propose using the last-layer hidden states before the language modeling head as implicit reasoning token representations, feeding these back into the model to generate the next token auto-regressively. These implicit representations are optimized in a stage-wise manner, akin to the approach of Deng et al. (2024b). Xu et al. (2025f) propose an approach for continuous-space reasoning that does not require modifying the LLM reasoner. Specifically, they employ a lightweight fixed assistant model to generate instance-specific soft thought tokens speculatively as the initial chain of thoughts, which are then mapped into the LLM's representation space via a trainable projection module.", + "bbox": [ + 109, + 579, + 880, + 806 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "5.2 Learning to Reason with Single-agent Systems", + "text_level": 1, + "bbox": [ + 109, + 821, + 504, + 838 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "As discussed in Section 2.3, agentic systems enhance the reasoning capabilities of standalone LLMs by incorporating agent-environment interactions. These interactions enable the agent to perceive its environment", + "bbox": [ + 109, + 849, + 883, + 880 + ], + "page_idx": 27 + }, + { + "type": "page_footnote", + "text": "6We treat the work using reward model/tool-based verifier for RL in the scope of single-agent systems (see Sec. 5.2) 7As discussed in Section 4.2, in outcome-based RL, the reward is assigned to the entire trajectory. This contrasts with process-based RL, which assigns a reward at each step.", + "bbox": [ + 109, + 886, + 883, + 924 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "28", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 27 + }, + { + "type": "table", + "img_path": "images/6468e2dd5f73b620b8760b6d78b4044d48515269db4a51d44ab8543841c582c9.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
PerspectiveMethodCharacteristicRepresentative Work
Action-Environment InteractionsIncorporating FeedbackUse environment feedback to filter trajectoriesNi et al. (2024); Xin et al. (2024b)
Training External ModelsTrain models (e.g., to critic) from the interactionWu et al. (2024c)
Search with VerifiersUse verifiers to identify better reasoning trajectoriesWan et al. (2024c)
Distillation from TeacherDistill capability from frontier reasoning modelGou et al. (2024); Ma et al. (2024a)
Training from TrajectoriesSupervised Fine-TuningCollected offline trajectories + learn via SFTDou et al. (2024b); Yin et al. (2024)
Reinforcement LearningLearning directly on questions and their rewardsShao et al. (2024)
Learning with RefinerTrain refiner model to iteratively improve the last-round solution.Xiong et al. (2025)
", + "bbox": [ + 117, + 101, + 879, + 193 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Table 7: Summary of learning to reason with single-agent systems.", + "bbox": [ + 256, + 203, + 736, + 218 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "and accordingly perform actions. This section explores how simulation is achieved through the design of such perceptions and agent actions. It then covers training methods—how agents are trained using these trajectories. Additionally, we discuss how predefined patterns are leveraged when collecting trajectories.", + "bbox": [ + 109, + 247, + 883, + 292 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "5.2.1 Trajectory Collection through Agent-Environment Interactions", + "text_level": 1, + "bbox": [ + 109, + 309, + 635, + 325 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "By interacting with the external world in different ways, agents can effectively construct trajectories that help refine their reasoning process. These interactions to enrich reasoning take the form of (a) incorporating execution feedback, (b) training external models to help reasoning, (c) search with verifiers, and (d) trajectory distillation from stronger teacher agents.", + "bbox": [ + 109, + 335, + 883, + 397 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Incorporating execution feedback Through active interaction with the environment, the agent can obtain valuable feedback for trajectory filtering. Building on STaR (Zelikman et al., 2022) (discussed in Sec. 5.1.2), NExT (Ni et al., 2024) leverages unit tests (Ye et al., 2022) to obtain self-generated rationales that lead to correct solutions for training. AlphaProof (AlphaProof & teams, 2024) and DeepSeek-Prover (Xin et al., 2024a) solve formal theorem-proving problems by generating potential solutions and validating them through interaction with the Lean proof assistant (De Moura et al., 2015), either proving or disproving the solutions. Xin et al. (2024b) further improve DeepSeek-Prover by introducing RMaxTS, an exploration strategy driven by intrinsic rewards to generate diverse proof paths. Furthermore, the agent can integrate environmental information directly into the training process to improve its reasoning capabilities. For example, Cummins et al. (2023) train a 7B model from scratch, achieving significantly improved code optimization performance by leveraging optimizing transformations from external LLVM compilers.", + "bbox": [ + 109, + 412, + 883, + 580 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Training external models The agent can leverage its interaction with the environment to train external models that can in turn help the agent's reasoning. For example, Wu et al. (2024c) train a critic model to identify relatively easier problems for the policy to explore and guide the policy in searching for deeper proof paths. Re-ReST (Dou et al., 2024b) proposes training a refiner to correct the agent's wrong output based on environmental feedback.", + "bbox": [ + 109, + 597, + 883, + 674 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Reasoning search with verifiers Search-based methods address sampling challenges for more difficult problems by leveraging external reward models or generation probabilities to guide decoding. For example, Wan et al. (2024c) develop a Monte Carlo Tree Search (MCTS)-based approach to identify better reasoning trajectories. Each tree node represents either a sentence or token, and a learned LLM-based value function and outcome reward model are used to estimate expected returns during the search process. This method can be applied for both inference-time path selection and training-time imitation learning.", + "bbox": [ + 109, + 690, + 883, + 781 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Guan et al. (2025) rely solely on outcome labels to iteratively update the policy model and a process preference model (PPM) through MCTS. The PPM approximates the Q-value of intermediate reasoning steps. Lai et al. (2024) use an LLM-as-judge to identify the first reasoning step in a sampled trajectory that contains an error. The trajectory up to the error is then used to sample new outputs, and DPO preference pairs are formed from correct and incorrect outputs. Zhang et al. (2024h) focus on unsupervised settings where answer labels are unavailable. Discarded steps collected during the search process are treated as negative actions, contrasting with the steps retained in the final path for DPO training. For multi-step reasoning in dynamic environments, such as web navigation, Putta et al. (2024) propose combining guided MCTS with self-critique to facilitate more effective exploration.", + "bbox": [ + 109, + 787, + 883, + 925 + ], + "page_idx": 28 + }, + { + "type": "page_number", + "text": "29", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Trajectory distillation from stronger teacher agents To tackle challenging mathematical problems, Gou et al. (2024) curate interactive tool-use (e.g., code execution) trajectories using GPT-4, derived from existing mathematical datasets across various domains. Similarly, MuMath-Code (Yin et al., 2024) employs multi-perspective data augmentation to generate diverse math questions and synthesizes code-nested solutions using GPT-4. Beyond mathematics, other domains have also been explored. For instance, Ma et al. (2024a) construct a tool-augmented training set for scientific reasoning by prompting GPT-4. CoGEX (Weir et al., 2024) extends LLMs' program synthesis capabilities to tasks that are not easily expressible as code, such as commonsense reasoning and sarcasm understanding. To collect training trajectories, GPT-4 is used to transform the Alpaca dataset (Taori et al., 2023) into the required format. Ke et al. (2025b) explore collecting trajectories from a more capable generative reward model (GPT-4o) to train a finance-expert model by identifying and correcting the first erroneous step in the reasoning process. Additionally, AgentBank (Song et al., 2024) introduces the largest dataset of agent-environment interaction trajectories, comprising 16 tasks across 5 distinct agent skill dimensions. This dataset is created by annotating actions and their corresponding rationales using LLMs of varying scales, addressing key challenges in trajectory collection, such as scalability.", + "bbox": [ + 109, + 103, + 883, + 330 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "In addition to leveraging trajectories from GPT-4, Gou et al. (2024) introduce output space shaping by incorporating samples generated by the agent itself. Specifically, they train the agent on both self-sampled correct trajectories and those corrected by a teacher model, promoting diversity in plausible reasoning steps.", + "bbox": [ + 109, + 337, + 883, + 383 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "5.2.2 Agent Training from Trajectories", + "text_level": 1, + "bbox": [ + 112, + 398, + 418, + 414 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Supervised Fine-Tuning (SFT) After collecting trajectories, many methods apply supervised fine-tuning (SFT) to train the agent, enabling models with little prior experience in agentic environments to adapt quickly. Dou et al. (2024b) enhances agent reasoning by incorporating refiner-corrected samples into the self-training process. NExT (Ni et al., 2024) uses filtered trajectories to train agents for program repair tasks, while Weir et al. (2024) fine-tune agents on collected trajectories to enable the generation and emulation of pseudo-programs. AlphaProof (AlphaProof & teams, 2024) and DeepSeek-Prover (Xin et al., 2024a) iteratively train and refine the policy model using verified proofs, improving performance in theorem proving tasks. Similarly, Gou et al. (2024), Yin et al. (2024), Ma et al. (2024a), and Song et al. (2024) fine-tune agents on agent-environment interaction trajectories generated by proprietary LLMs, enhancing reasoning capabilities across diverse domains. Notably, MuMath-Code (Yin et al., 2024) adopts a two-stage training strategy, first fine-tuning on pure CoT data and then on code-nested data. Chen et al. (2024e) introduce Agent-FLAN, a fine-tuning method designed to improve LLMs' agent capabilities while addressing challenges such as distribution shifts and hallucinations in training data. By redesigning the training corpus and incorporating negative samples, Agent-FLAN enhances both agent-specific and general capabilities of LLMs.", + "bbox": [ + 109, + 425, + 883, + 650 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Reinforcement Learning (RL) Beyond imitation learning through SFT, recent approaches have leveraged reinforcement learning to further enhance reasoning capabilities. Notably, GRPO (Shao et al., 2024; DeepSeek-AI et al., 2025), which employs verifiable outcome rewards during online RL training, has demonstrated strong empirical performance. Havrilla et al. (2024) investigate multiple RL algorithms (e.g., Expert Iteration, PPO) for math reasoning tasks, finding that incorporating outcome reward models has negligible effects on performance for both Expert Iteration and PPO. Similarly, Shao et al. (2024) observe relatively minor performance gains when using PRMs during GRPO training. Yang et al. (2024b) explore using a PRM to \"shape\" outcome rewards by using a linear combination of outcome and PRM rewards for GRPO training. In contrast, Wang et al. (2024g); Luo et al. (2023a); Jiao et al. (2024a) demonstrate that using a trained PRM during PPO training leads to significant performance improvements. Similar gains are observed in the code generation domain (Dai et al., 2024), where the PRM serves both as a reward signal and as an initial checkpoint for the value function during PPO. Zhang et al. (2024a) iteratively train both a PRM and LLM, while Setlur et al. (2024b) provide a new perspective by comparing Q-value-based PRMs with advantage function-based ones, showing improved learning efficiency and performance in guided reinforcement learning. Concurrently, Gao et al. (2024a) address reward hacking (Casper et al., 2023)—where the policy model generates numerous correct but irrelevant reasoning steps to inflate rewards—by implementing clipping and computing relative, step-adjacent rewards.", + "bbox": [ + 109, + 667, + 880, + 924 + ], + "page_idx": 29 + }, + { + "type": "page_number", + "text": "30", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 29 + }, + { + "type": "table", + "img_path": "images/9a72af506464d7266485c42711ec30e3529535cc0cb4bc99428f137ea9d892be.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
PerspectiveMethodCharacteristicRepresentative Work
Designing CommunicationCentralized communicationUse a centralized controller for information aggregationCanese et al. (2021); Matta et al. (2019)
Conditioned information sharingShare information based on relevancy and privacyHong et al. (2023); Qiu et al. (2024)
Coordinating ActionsLeverage knowledgeUtilize expert knowledge as constraintsLau et al. (2012)
Graph-based methodsUse graphs as structured frameworksRuan et al. (2022); Li et al. (2020)
Hierarchical approachDivide policies to strategy and executionXu et al. (2023)
Training from TrajectoriesTraining data from interactionsObtain high-quality trajectories from interactionsLi et al. (2024c); Estornell et al. (2024)
Gradient modificationModify gradients towards optimal pointsLi et al. (2024f)
", + "bbox": [ + 117, + 101, + 879, + 195 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Table 8: Summary of learning to reason for multi-agent systems.", + "bbox": [ + 264, + 205, + 730, + 220 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Qiao et al. (2023a) introduce TRICE, a two-stage framework that enables agents to determine when and how to use tools through Reinforcement Learning with Execution Feedback (RLEF) from external tools. Similarly, Xin et al. (2024b) enhance DeepSeek-Prover by incorporating reinforcement learning from proof assistant feedback (RLPAF). To effectively learn from both successful and unsuccessful agent-environment interactions, Putta et al. (2024) develop an off-policy variant of DPO for iterative training.", + "bbox": [ + 109, + 250, + 883, + 325 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Learning with refiner For more challenging questions, models may fail to generate enough successful trajectories to serve as a reliable positive training signal. However, even trajectories with incorrect outcomes can still be leveraged effectively. For example, Qu et al. (2024a) train a correction model using RL to iteratively refine generated model responses. Similarly, Tang et al. (2025) propose a self-evolving framework to train a critique model, which enhances the quality of outputs through continuous feedback.", + "bbox": [ + 109, + 342, + 883, + 419 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Refiner models can also be integrated into the search process to iteratively improve generation quality. For instance, Snell et al. (2024) train a refiner model via RL (Qu et al., 2024b) to refine outputs sequentially. The final prediction is obtained through majority voting over all predictions generated during this iterative refinement process, effectively scaling test-time computation. Xi et al. (2024) develop a step-level critique model that provides feedback for each reasoning step, using training instances collected from GPT-4o. This feedback serves two purposes: (1) expanding training data to improve the actor model, and (2) scaling test-time computation through iterative self-refinement in a multi-agent setup. Zhang et al. (2024b) combine reasoning and self-refinement into a single MCTS framework, where each node is either a reasoning node (generating complete reasoning trajectories) or a refining node (identifying and correcting reasoning flaws). A learned pairwise reward model compares the quality of refined and original outputs, estimating the expected returns of each node. However, this work does not explicitly account for the inference setting, where neither the reasoner nor the refiner has access to the correctness of the sampled response. This can lead to refiners inadvertently degrading originally correct solutions. To address this issue, Xiong et al. (2025) introduce a learnable self-rewarding mechanism. This approach mitigates the risk of worsening correct solutions and alleviates the distribution-shifting problem in self-correction (Kumar et al., 2024).", + "bbox": [ + 109, + 426, + 883, + 652 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "5.3 Learning to Reason with Multi-agent System", + "text_level": 1, + "bbox": [ + 109, + 671, + 495, + 686 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "In Section 2.3, we discussed how multi-agent systems extend single-agent systems through agent-agent communication. This enables agents to assume distinct roles, exchange messages, and coordinate their actions before interacting with the environment. In this section, we explore how trajectory collection can be achieved through the careful design of agent-agent communication and the coordination of actions across different agents. As a system level, communication serves as the input or perception mechanism for participating agents, focusing on the protocols governing message exchange. Meanwhile, actions represent the output of the system, addressing how consensus is reached given the diverse actions proposed by individual agents.", + "bbox": [ + 109, + 699, + 883, + 806 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "5.3.1 Designing Agent-Agent Communication", + "text_level": 1, + "bbox": [ + 109, + 821, + 470, + 838 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "In a multi-agent framework, ensuring that each agent is aware of the actions of others is critical, as a well-designed communication system can significantly enhance collective intelligence (Guo et al., 2024b). One effective solution is the use of a centralized controller (Canese et al., 2021). For example, Matta et al. (2019) propose a centralized aggregation center that constructs a global swarm matrix by aggregating the Q-value tables of all agents. Similarly, the MARCO framework (Zhang et al., 2021) employs centralized training with", + "bbox": [ + 109, + 849, + 883, + 925 + ], + "page_idx": 30 + }, + { + "type": "page_number", + "text": "31", + "bbox": [ + 488, + 948, + 506, + 959 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "decentralized execution to improve sample efficiency in partially observable multi-agent environments. By learning a shared model that generalizes across agents' policies and directing exploration toward uncertain areas, MARCO optimizes reasoning and resource utilization in cooperative tasks.", + "bbox": [ + 109, + 103, + 883, + 148 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "To enable effective communication among agents, Sukhbaatar et al. (2016) introduce a neural communication model with a learned protocol tailored to the task. Additionally, a shared message pool (Hong et al., 2023) can be implemented, where agents send messages and subscribe to relevant ones based on their individual profiles. In recent work by Qiu et al. (2024), each agent maintains a private intention, which includes its current goal and associated sub-tasks. These intentions are broadcast periodically, and a propagation network converts them into teammate-specific communication messages, ensuring that relevant goals are shared with the appropriate teammates.", + "bbox": [ + 109, + 156, + 883, + 262 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "5.3.2 Coordinating Actions among Multiple Agents", + "text_level": 1, + "bbox": [ + 109, + 279, + 511, + 295 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "To enhance coordination among multiple agents, various approaches have been proposed, including leveraging expert knowledge, graph-based frameworks, and hierarchical structures to improve efficiency and effectiveness. For better coordination of actions across agents, Lau et al. (2012) utilize expert coordination knowledge as constraints to refine the exploration and learning process. By reducing the action space and focusing on promising states, this approach enhances decision-making. Additionally, graph-based methods have been explored to improve coordination. For instance, the Graph-based Coordination Strategy (GCS) (Ruan et al., 2022) introduces a framework that employs a directed acyclic graph to coordinate agent policies. This enables agents to synchronize their actions through predefined temporal sequences. Similarly, Deep Implicit Coordination Graphs (DICG) (Li et al., 2020) propose a graph neural network-based module to dynamically infer coordination structures for multi-agent reinforcement learning (MARL).", + "bbox": [ + 109, + 306, + 883, + 455 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Furthermore, hierarchical approaches have been developed to enhance synchronization. The Hierarchical Cooperative Multi-Agent Learning (HAVEN) framework (Xu et al., 2023) divides policies into two levels—strategy and execution—improving both inter-agent and inter-level coordination.", + "bbox": [ + 109, + 464, + 883, + 510 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "5.3.3 Multi-Agent Training from Trajectories", + "text_level": 1, + "bbox": [ + 109, + 526, + 465, + 542 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Compared to single-agent scenarios, multi-agent training introduces additional challenges in higher coordination and communication complexity and recent approaches have leveraged different ways to address the challenge. DEBATUNE (Li et al., 2024c) employs a multi-round debate mechanism between two agents with opposing stances to generate training data. Through iterative debate, arguments are refined, resulting in high-quality and diverse outputs. During the training phase, models are fine-tuned using these debate-generated trajectories, enabling controllability and alignment with user-defined stances. Similarly, Subramaniam et al. (2025) fine-tune a society of agents, starting from the same base model, on independent data generated through multi-agent interactions. These agents specialize in distinct roles, such as \"generation\" and \"critic\" producing diverse reasoning trajectories. Training on such varied trajectories fosters specialization and mitigates performance plateaus. Acc-Debate (Estornell et al., 2024) utilizes an Actor-Critic framework to train a team of two agents collaboratively. One agent serves as the \"Actor\" generating responses, while the other acts as the \"Critic\" refining those responses. Training alternates between optimizing the Actor and Critic models, leveraging partial trajectory rewards which captures the expectation of reaching the correct answer at intermediate time stepsto address temporal dependencies in the debate process. This approach enhances collaboration and improves final performance.", + "bbox": [ + 109, + 553, + 883, + 779 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Furthermore, Li et al. (2024f) address the challenge of mixed-motive cooperation in multi-agent systems by modifying gradients to guide agents toward stable fixed points that balance individual and collective interests. This method enhances the ability to optimize trajectories for effective collaboration.", + "bbox": [ + 109, + 787, + 883, + 833 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "5.4 Toward Cost-aware and Inference-aware Training", + "text_level": 1, + "bbox": [ + 109, + 851, + 522, + 867 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "As reasoning models grow increasingly complex, ensuring both efficiency and effectiveness becomes crucial. Inference-time scaling and learning-to-reason approaches play complementary roles, as most inference-time scaling methods can be applied to models specifically trained for reasoning. However, both approaches come", + "bbox": [ + 109, + 878, + 883, + 925 + ], + "page_idx": 31 + }, + { + "type": "page_number", + "text": "32", + "bbox": [ + 488, + 946, + 509, + 960 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "with associated costs, whether it involves generating thousands of additional tokens compared to greedy decoding during inference or training models on large-scale trajectory datasets. Consequently, cost-aware methodologies, which factor in computational costs when deciding how to allocate resources during both training and inference, or those that address sample inefficiency, have gained recent attention. Similarly, inference-aware methodologies aim to enhance the time and cost efficiency of inference scaling by explicitly incorporating inference-time scaling strategies during training. In this section, we explore emerging cost-aware and inference-aware approaches.", + "bbox": [ + 109, + 103, + 883, + 210 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "5.4.1 Cost-aware Training", + "text_level": 1, + "bbox": [ + 112, + 227, + 323, + 242 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Learning to reduce inference cost This line of research explores strategies to optimize the tradeoff between computational cost and reasoning performance by dynamically allocating resources based on input (prompt) complexity and desired output quality. For prompt analysis, Damani et al. (2025) use a learnable model to predict the difficulty of batched queries and dynamically allocate inference budgets accordingly. Building on this, Zhang et al. (2024d) train a model to predict the most efficient combination of inference strategies, directly optimizing for pass rates. Yue et al. (2025) decompose reasoning trajectories into specific behaviors and employ a trainable planner to derive question-specific compositions, identifying the optimal reasoning strategy—such as whether question decomposition or rewriting is necessary, whether Python programs are required, or if answer verification is needed. On the output side, Snell et al. (2025) propose a look-ahead search method, similar to step-level beam search, which switches between branches based on estimated returns to minimize search costs.", + "bbox": [ + 109, + 253, + 883, + 420 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Data-efficient training Another research direction focuses on reducing training costs by using a small set of high-quality samples (questions paired with trajectories or labels). Muennighoff et al. (2025) curate a dataset of 1,000 samples, emphasizing difficulty, diversity, and quality. Their work demonstrates that finetuning Qwen2.5-32B-Instruct on this dataset achieves performance surpassing o1-preview on competition math benchmarks. Ye et al. (2025) fine-tune Qwen2.5-32B-Instruct on 817 carefully curated training samples, achieving superior performance across a broader set of math reasoning benchmarks. Notably, Ye et al. (2025) highlight that these performance gains depend on using strong pre-trained models like Qwen2.5-32B-Instruct and do not occur with weaker models (e.g., Qwen1.5-32B-Instruct).", + "bbox": [ + 109, + 438, + 883, + 561 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "5.4.2 Inference-aware Training", + "text_level": 1, + "bbox": [ + 112, + 577, + 356, + 593 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Existing work on inference scaling typically treats inference-time computation as a post-hoc design choice after conventional training. Inference-aware training approach challenges the assumption that decoupling training and inference-time computation is optimal. For instance, if an LLM is allowed multiple attempts to solve a math problem, fine-tuning it to explore diverse problem-solving strategies might yield better results than simply generating candidates representing its best single attempt.", + "bbox": [ + 109, + 604, + 883, + 680 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "The core idea is that explicitly considering the inference procedure during training can significantly enhance the effectiveness of inference-time computation. For example, Best-of-N (BoN) is a basic inference-time strategy that selects the highest-reward response from $N$ candidates. However, this approach is misaligned with fine-tuning objectives. To address this, Sessa et al. (2024) propose an RL objective that distills the Best-of-N distribution into the policy model using Jeffreys divergence (Jeffreys, 1946). Similarly, Balashankar et al. (2024) develop a calibrated reward that incorporates the inference procedure (Best-of-N) during alignment. In a related effort, Chow et al. (2024) aim to optimize BoN directly, overcoming the non-differentiable argmax operator by employing a reinforcement learning framework.", + "bbox": [ + 109, + 686, + 880, + 808 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "6 Discussion: Trends and Open Challenges", + "text_level": 1, + "bbox": [ + 109, + 829, + 516, + 847 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "The field of reasoning LLMs has seen rapid advancements, with notable trends emerging in training-vs-inference regimes and architectural dimensions as we discuss in Section 6.1. Despite this progress, several challenges remain, hindering their generalizability and practical applicability. This section outlines these observed trends and highlights open challenges, along with potential directions to address them (Section 6.2).", + "bbox": [ + 109, + 864, + 883, + 926 + ], + "page_idx": 32 + }, + { + "type": "page_number", + "text": "33", + "bbox": [ + 488, + 946, + 508, + 960 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "6.1 Observed Trends", + "text_level": 1, + "bbox": [ + 116, + 102, + 285, + 118 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Following the two dimensions outlined in Figure 2, we identify two key trends in LLM reasoning: one progresses from inference scaling to learning to reason (Section 6.1.1), while the other shifts from standalone LLMs to agentic systems (Section 6.1.2). Additionally, reasoning is ubiquitous yet challenging when developing a general-purpose reasoner. Notably, many state-of-the-art reasoning language models are predominantly focused on a few domains, particularly mathematics and coding (OpenAI et al., 2024; DeepSeek-AI et al., 2025). Whether it is possible to build a truly generalizable reasoning system remains an open question (Kang et al., 2024; Qi et al., 2024; Huang et al., 2024c; Sun et al., 2024c). However, we observe a growing trend toward developing domain-specific reasoning models (Section 6.1.3).", + "bbox": [ + 114, + 131, + 883, + 252 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "6.1.1 From Inference Scaling to Learning to Reason", + "text_level": 1, + "bbox": [ + 116, + 267, + 517, + 284 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Since the introduction of CoT and self-consistency (Wang et al., 2023f), inference scaling techniques have emerged as a key paradigm for enhancing reasoning performance without incurring the costs associated with reasoning-specific training. Inference scaling complements learning-to-reason approaches, with recent studies demonstrating that combining self-consistency with reasoning-specific training yields further improvements (DeepSeek-AI et al., 2025; Muennighoff et al., 2025). Additionally, since the release of OpenAI's o1 (Huang et al., 2024d), some methods have sought to activate human-like reasoning patterns by introducing self-correction (Kumar et al., 2024), self-critique (Xi et al., 2024), or even MCTS Qin et al. (2024).", + "bbox": [ + 114, + 294, + 883, + 401 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Researchers initially found that data-driven approaches, such as supervised fine-tuning (SFT) and knowledge distillation, were highly effective in enhancing LLMs' reasoning capabilities. However, these methods rely on the availability of a strong teacher model for distillation. An alternative approach uses outcome labels for iterative rejection sampling (Yuan et al., 2023), which converges quickly after a few iterations (Dong et al., 2023). These limitations have spurred the development of more data-efficient methods, such as automatic process supervision (Jiao et al., 2024a; Wang et al., 2024g;k; Luo et al., 2024b) and iterative refinement (Guan et al., 2025), which optimize training trajectories using fixed outcome labels. The release of Deepseek-R1 (DeepSeek-AI et al., 2025) further advanced the field, demonstrating the ability to generate human-like, long reasoning chains through pure reinforcement learning under outcome supervision alone.", + "bbox": [ + 114, + 407, + 883, + 542 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "6.1.2 From Standalone LLMs to Agentic Systems", + "text_level": 1, + "bbox": [ + 116, + 559, + 501, + 575 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "In Sections 2.3 and 5, we discussed how the rise of agentic systems has significantly influenced reasoning research. A clear trend has emerged, shifting from standalone LLM reasoning to agentic reasoning. This shift aligns with our expectations: reasoning is no longer confined to a single LLM but is expected to interact with the external world and other agents, as well as exhibit autonomy, such as planning capabilities.", + "bbox": [ + 114, + 585, + 883, + 647 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "On one hand, there is ongoing debate about whether agentic reasoning is always beneficial, especially for straightforward and simple tasks (Sprague et al., 2024b; Liu et al., 2024c). On the other hand, current systems' autonomy is largely limited to planning, whereas it could encompass much more. For instance, system-level or meta-level planning is essential in agentic systems, requiring the design of effective ways to connect different agents (Zhou et al., 2025a; Zhuge et al., 2024; Zhang et al., 2024c; Hu et al., 2025). A notable recent study (Ke et al., 2025c) demonstrates that such design can be with zero supervision and through self-improvement alone. Another critical aspect of autonomous agents is proactivity, yet current reasoning agents still lack the ability to proactively seek clarification or request additional information from users or the environment.", + "bbox": [ + 114, + 654, + 883, + 790 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "6.1.3 Domain-Specific Reasoners", + "text_level": 1, + "bbox": [ + 116, + 806, + 377, + 821 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Mathematical reasoning Mathematics serves as an ideal testbed for studying LLM reasoning capabilities due to its structured nature and clear evaluation criteria. Mathematical reasoning has evolved along two complementary paths. The first, often referred to as the \"informal approach\" (Yang et al., 2024d), treats mathematical problems as natural language tasks and fine-tunes LLMs on carefully curated or filtered problem-solving datasets. Systems like NuminaMath (Fleureau et al., 2024), DeepSeekMath (Shao et al., 2024), Llemma (Azerbayev et al., 2024), and MetaMath (Yu et al., 2024b) have demonstrated remarkable", + "bbox": [ + 114, + 832, + 883, + 924 + ], + "page_idx": 33 + }, + { + "type": "page_number", + "text": "34", + "bbox": [ + 488, + 948, + 509, + 960 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "capabilities by combining mathematical text training (pre-training, supervised fine-tuning, and reinforcement learning), tree-based search, tool-integrated reasoning, and various inference scaling techniques discussed in earlier sections. This approach has achieved significant success across benchmarks ranging from GSM8K (Cobbe et al., 2021) and MATH (Hendrycks et al., 2021b) to competition-level problems such as AIMO (Markets, 2024) and AIME-level problems (aim, 2025). However, challenges persist in tackling college-level and advanced mathematics, where high-quality training data is scarce, and verifying complex multi-step reasoning becomes increasingly difficult. Spatial reasoning (e.g., counting, navigation, and inferring spatial relationships) presents another challenge for LLMs and multi-modal LLMs (Wang et al., 2024b).", + "bbox": [ + 109, + 103, + 883, + 224 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Complementing the informal approach, formal mathematical reasoning grounds systems in precise symbolic frameworks, such as proof assistants like Isabelle (Nipkow et al., 2002), Lean (De Moura et al., 2015), and Coq (Barras et al., 1997; The Coq Development Team, 2024). Recent advances in this direction include neural theorem-proving systems that combine tactic generation with proof search (Yang et al., 2023b; Thakur et al., 2024), as well as autoformalization techniques that translate between natural and formal mathematics (Wu et al., 2022; Jiang et al., 2024a). The formal approach offers several advantages: automatic verification of reasoning steps, generation of training signals from the verification environment, and the potential to bootstrap capabilities through learned abstractions. For example, AlphaProof (AlphaProof & teams, 2024) and AlphaGeometry (Trinh et al., 2024) demonstrate the power of integrating neural networks with symbolic verification, achieving groundbreaking performance on Olympic-level mathematics problems. A recent position paper by Yang et al. (2024d) argues that formal mathematical reasoning represents a critical frontier for advancing AI's ability to tackle increasingly abstract and complex mathematical problems.", + "bbox": [ + 109, + 231, + 883, + 414 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Code generation Code serves as a more formal language for reasoning. Given the complexity of generating entire programs, earlier studies primarily focused on function-level code completion, as demonstrated by benchmarks such as HumanEval (Chen et al., 2021) and MBPP (Austin et al., 2021). With stronger foundation models trained on extensive code corpora (Zhu et al., 2024a; Hui et al., 2024), the focus of evaluation has shifted toward general competition programming (Hendrycks et al., 2021a; Jain et al., 2024). The earliest significant attempt to solve competition-level coding problems through large-scale training was AlphaCode (Li et al., 2022). Similar to the general domain, the training paradigm has evolved from instruction tuning (Wei et al., 2024) to RL and preference learning based on test cases and compiler feedback (Dou et al., 2024a; Weyssow et al., 2024; Jiao et al., 2025; Huang et al., 2024b). The recent releases of DeepSeek-R1 (DeepSeek-AI et al., 2025) and OpenAI's o3 (OpenAI et al., 2025) have further advanced the field by enabling end-to-end RL through outcome supervision. OpenAI et al. (2025) also highlight that purely data-driven approaches can outperform models incorporating human-experience-based competition strategies.", + "bbox": [ + 109, + 431, + 883, + 612 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Another important application of code generation is in software engineering, where advancements in LLMs are making fully automated pipelines increasingly feasible. SWE-Bench (Jimenez et al., 2024), a benchmark based on GitHub issues, challenges LLMs with real-world software engineering problems. These tasks require coupled abilities, such as long-context modeling to process repository-level inputs, logical reasoning to locate bugs and design unit tests, and programming to implement solutions. Wei et al. (2025) pioneer the use of end-to-end RL for optimizing automatic debugging. Specifically, they select pull requests (PRs) from GitHub linked to issues and use the consistency between the predicted code snippet and the repository's code after the PR is merged as the reward signal.", + "bbox": [ + 109, + 619, + 883, + 741 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Tabular reasoning Reasoning over tabular (or structured) data, which involves generating responses based on user queries and provided tables, plays a vital role in improving data analysis efficiency (Lu et al., 2025). A critical aspect of tabular reasoning with LLMs involves transforming structured data into a format that these models can process effectively. Techniques such as serialization (Chen, 2023; Cheng et al., 2023; Chen et al., 2023e), prompt engineering (Ye et al., 2023b; Lin et al., 2023b; Wang et al., 2024n; Zhang et al., 2024j), and embedding methods (Herzig et al., 2020) have been widely studied to facilitate this adaptation, converting tabular data into human-readable text or leveraging specialized table representations. Additionally, specialized prompting of LLMs with transformed tabular data is crucial. For instance, Pourreza & Rafiei (2023); Ye et al. (2023c) find that LLMs perform better on decomposed sub-tasks than on the entire table reasoning task. However, LLMs may still struggle with certain sub-tasks. To address this, (Cao et al., 2023) employ diverse tools for specific sub-tasks, while (Lin et al., 2023b;a) focus on retrieving relevant", + "bbox": [ + 109, + 758, + 883, + 925 + ], + "page_idx": 34 + }, + { + "type": "page_number", + "text": "35", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "tables. Notably, (Jiang et al., 2023) propose a unified approach to enhance LLM reasoning over structured data by designing specialized interfaces. These interfaces extract relevant evidence from structured data, enabling LLMs to focus on reasoning based on the gathered information.", + "bbox": [ + 109, + 103, + 883, + 148 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Despite the promising results of various adaptation methods, significant challenges remain. First, tabular data often comprises diverse feature types—categorical, numerical, and textual—adding complexity to modeling (Borisov et al., 2023; Gruver et al., 2023). Second, the effectiveness (Sui et al., 2024) and robustness (Liu et al., 2024d) of LLMs in tabular tasks heavily depend on proper prompt design and data preprocessing. Poor or out-of-distribution preprocessing can lead to information loss, misinterpretation, multicollinearity, and interpretability issues, significantly degrading performance (Sui et al., 2024). Finally, LLMs are prone to hallucinations (Ye et al., 2023d) and fairness concerns (Liu et al., 2023), limiting their reliability. For a comprehensive overview, see recent surveys on LLMs for table reasoning (Fang et al., 2024b; Dong & Wang, 2024; Zhang et al., 2025a; Lu et al., 2025).", + "bbox": [ + 109, + 156, + 883, + 292 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Reasoning in multi-agent games In game-theoretic scenarios involving both collaboration and competition, strategic social reasoning skills are essential (Lee et al., 2024). Strategic reasoning refers to the cognitive process of making decisions in complex social situations. As highlighted by Feng et al. (2024b), the complexity and challenges of this reasoning stem from the involvement of multiple parties and the dynamic nature of the environment.", + "bbox": [ + 109, + 314, + 883, + 388 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "To capture the cognitive states of multiple parties, the concept of Theory-of-Mind (ToM) (Zhang et al., 2012) has been integrated into modeling processes. ToM attributes mental states—such as beliefs, intentions, desires, emotions, and knowledge—to oneself and others. Recent studies (Kosinski, 2024) have shown that LLMs exhibit ToM capabilities, and researchers have leveraged these capabilities to enhance strategic reasoning in social scenarios. For instance, Guo et al. (2023) computationally model the beliefs, intents, and potential behaviors of teammates and opponents to improve understanding and reasoning in games. Similarly, TOMABD (Montes et al., 2023) incorporates ToM into agents to enhance their reasoning and decision-making abilities. To address the complexity of dynamic social interactions (Li et al., 2024d), prior research employs RL methods to explore potential behaviors and evaluate different states (Seo & Lee, 2017; Wen et al., 2019). Additionally, some studies introduce modular frameworks to improve strategic reasoning in complex scenarios. For example, ReTA (Duan et al., 2024) uses LLM-based modules as the main actor, reward actor, and anticipation actor, inspired by minimax game theory. Recent work (Trecsenyi et al., 2025) has also begun exploring role-based multi-agent interactions to enable more sophisticated strategic reasoning. These approaches collectively enhance LLMs' strategic reasoning capabilities in dynamic environments.", + "bbox": [ + 109, + 396, + 883, + 609 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Reward modeling and evaluation as a reasoning task Evaluation, whether as an end goal or a component of a larger reasoning system, remains a significant challenge. While using PRMs to enhance reasoning abilities is popular during both inference and training, training these models requires extensive step-by-step annotations (Lightman et al., 2024). To address this, recent approaches have introduced automated feedback mechanisms, such as tree search (Wang et al., 2024g; Chen et al., 2024a; Setlur et al., 2024a; Luo et al., 2024c; Wang et al., 2024l) or, less frequently, LLM-as-judge (Zhang et al., 2025b). Although these methods avoid human preference annotations, they often rely on trajectories sampled from a fixed policy model, which may not align well with the problem distribution. This misalignment leads to poor generalization, as highlighted by Zheng et al. (2024). Consequently, the next frontier in reward modeling will need to combine automated data collection with diverse data sources to achieve annotation-efficient generalization.", + "bbox": [ + 109, + 630, + 883, + 781 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "While reasoning in LLM-as-judges is not explicitly addressed, recent training and inference techniques have drawn from established methods for improving reasoning. Judge-based assessment inherently involves a finite set of outcomes (e.g., A or B for pairwise judgments or 1-5 for single ratings), making it suitable for self-consistency decoding (Kim et al., 2024b). More advanced inference-time approaches, such as multi-judge or multi-round discussions (Li et al., 2023d; Chan et al., 2023; Verga et al., 2024; Yu et al., 2024d), self-rationalization (Trivedi et al., 2024), or sequential escalation (Jung et al., 2024), have been proposed. Concurrently, training-time solutions for LLM-as-judges focus on distilling chain-of-thought judgments from larger teacher models and fine-tuning smaller judges via supervised fine-tuning (Wang et al., 2023g; Li et al., 2023b; Kim et al., 2023; 2024c; Vu et al., 2024) or preference optimization (Hu et al., 2024; Wang et al.,", + "bbox": [ + 109, + 787, + 883, + 925 + ], + "page_idx": 35 + }, + { + "type": "page_number", + "text": "36", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "2024f; Ye et al., 2024; Saad-Falcon et al., 2024; Deshpande et al., 2024; Wang et al., 2024j). Despite these advancements, such models still struggle in reasoning-intensive domains (Tan et al., 2024; Zhou et al., 2025b; Xu et al., 2025b), whereas stronger reasoning models have outperformed specialized judge models in more difficult evaluation settings (Xu et al., 2025a). In all, recent benchmarking results highlight that developing reasoning-specific judges remains an open and challenging research area.", + "bbox": [ + 109, + 103, + 883, + 180 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "6.2 Open Challenges", + "text_level": 1, + "bbox": [ + 112, + 196, + 284, + 213 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Despite the trends observed in Section 6.1, several challenges remain. First, how can we effectively evaluate both the reasoning outcome and the reasoning chain? (Section 6.2.1). Second, do we truly understand reasoning? Does the reasoning chain generated by next-token sampling faithfully reflect the internal reasoning process of an LLM, or is it merely imitating its training data? (Section 6.2.2). Third, training of LLM reasoning system is still largely hindered by substantial data requirements, which include both more challenging questions and the corresponding outcome labels. This not only affects the end-to-end reasoner training, but also limits our exploration in building stronger reward models to facilitate inference time scaling (Section 6.2.3).", + "bbox": [ + 109, + 224, + 883, + 347 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "6.2.1 Evaluating Reasoning", + "text_level": 1, + "bbox": [ + 112, + 363, + 333, + 378 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "As language models and agentic systems tackle increasingly complex tasks, evaluating their performance becomes equally challenging. Currently, progress in LLM reasoning is measured by outcome performance on fixed benchmarks (e.g., MATH (Hendrycks et al., 2021b)). However, relying solely on outcomes to verify reasoning correctness may be insufficient, as a correct final answer does not guarantee a logically sound reasoning chain (Hao et al., 2024a). Prior work has shown that LLMs often produce unfaithful reasoning chains, even when the final answers are correct (Wiegreffe et al., 2022; Lyu et al., 2023; Wang et al., 2023b).", + "bbox": [ + 109, + 388, + 883, + 479 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Evaluating reasoning beyond outcomes remains an open and challenging problem. Early approaches relied on human annotators to assess the quality of generated explanations (Camburu et al., 2018; Rajani et al., 2019), focusing on whether the reasoning could lead to the same predictions. To scale this idea, follow-up works (Wiegreffe et al., 2020; Hase et al., 2020) used trained models as simulators to evaluate the alignment between generated reasoning and final predictions. When human-annotated reasoning chains are available, some studies leverage traditional NLG metrics to measure overlap between human- and model-generated explanations (Clinciu et al., 2021). Others propose reasoning-specific metrics to assess aspects like coherency, redundancy, factuality (Golovneva et al., 2022), informativeness (Chen et al., 2022), robustness (Wang & Zhao, 2024), and contextual faithfulness (Ming et al., 2025). Under the LLM-as-Judge paradigm, recent works prompt powerful LLMs like GPT-4 to directly evaluate reasoning chains generated by other models (Hao et al., 2024a; Sun et al., 2024b). However, as reasoning tasks grow in complexity, evaluation becomes increasingly difficult, even for frontier models—if a model cannot perform a task, how can it judge if the task is done correctly? Thus, developing robust and accurate methods to evaluate reasoning beyond outcomes remains a significant and unresolved challenge.", + "bbox": [ + 109, + 487, + 883, + 698 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "6.2.2 Understanding Reasoning", + "text_level": 1, + "bbox": [ + 112, + 715, + 362, + 729 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Recent research on understanding LLM reasoning has advanced along two complementary paths: empirical studies that evaluate and analyze performance through carefully designed and controlled experiments, and formal analyses that introduce new frameworks to systematically explore the underlying mechanisms of how LLMs reason.", + "bbox": [ + 109, + 741, + 883, + 801 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Empirical analysis of reasoning Recent LLMs exhibit strong performance across diverse tasks, suggesting some level of reasoning capability. However, whether these skills are general and transferable or merely specialized for tasks encountered during pretraining remains an open and debated question. To address this, several empirical studies have sought to understand and enhance LLM capabilities across various reasoning forms: abstractive reasoning (Wu et al., 2024a; He & Lu, 2024), compositional reasoning (Bhargava & Ng, 2022; Li et al., 2024g), inductive reasoning (Yang et al., 2024f; Han et al., 2024b), abductive reasoning (Jung et al., 2022; Pareschi, 2023), deductive reasoning (Poesia et al., 2024; Seals & Shalin, 2024; Feng et al.,", + "bbox": [ + 109, + 818, + 880, + 925 + ], + "page_idx": 36 + }, + { + "type": "page_number", + "text": "37", + "bbox": [ + 488, + 948, + 508, + 959 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "2024a), logical reasoning (Wan et al., 2024b; Han et al., 2024a; Xu et al., 2025c), commonsense reasoning (Lin et al., 2021; Liang et al., 2023a; Sun et al., 2024a), math reasoning (Ahn et al., 2024; Mirzadeh et al., 2025), and social reasoning (Gandhi et al., 2023). Notably, Arkoudas (2023) qualitatively evaluate GPT-4 on 21 diverse reasoning problems, concluding that despite occasional analytical success, GPT-4 remains incapable of true reasoning. Similarly, Wu et al. (2024a) empirically investigate abstractive reasoning and find that while LLMs achieve nontrivial performance on counterfactual tasks, their performance consistently degrades compared to default conditions, indicating reliance on narrow, non-transferable procedures. Mondorf & Plank (2024) provide a comprehensive survey on recent evaluations of LLM reasoning abilities.", + "bbox": [ + 109, + 103, + 883, + 224 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "Beyond assessing LLM reasoning capabilities, there is growing interest in evaluating how test-time scaling methods enhance reasoning. The empirical success of CoT prompting has spurred extensive research into its mechanisms. Wang et al. (2023a) and Madaan et al. (2023a) investigate the role of demonstrations, finding that LLMs prioritize pattern consistency over accuracy and exhibit robustness to invalid demonstrations—particularly in mathematical reasoning, where incorrect equations often do not hinder performance. They also emphasize the importance of relevant rationales and logical progression in CoT prompts. Additionally, Madaan et al. (2023a) conclude that CoT aids models by supplementing missing information, such as commonsense knowledge, and reinforcing task understanding. From a modeling perspective, Dutta et al. (2024) analyze CoT through neural mechanisms, revealing that LLMs process input context and generated CoT via parallel pathways. They find that early layers (e.g., layers 1-16 in Llama-2 7B (Touvron et al., 2023)) rely on pretraining knowledge, while later layers specialize in in-context learning, with answer-writing heads emerging in the final layers. From a task perspective, Sprague et al. (2024a) conduct a meta-analysis of 100 CoT papers, showing that CoT significantly improves performance on mathematical, logical, and algorithmic reasoning tasks but offers minimal gains for non-symbolic tasks. Their analysis suggests that CoT excels in computational steps but struggles with tool-augmented reasoning. On the training front, Gao et al. (2024a); Zhang et al. (2025b); Yeo et al. (2025) explore key supervised fine-tuning (SFT) and reinforcement learning (RL) factors that optimize LLM training strategies for enhancing CoT reasoning.", + "bbox": [ + 116, + 231, + 883, + 489 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "Formal analysis of reasoning There is increasing interest in formal analyses, which use structured and logical proofs to systematically evaluate and improve the reasoning capabilities of LLMs. Han et al. (2022) introduce FOLIO, a dataset designed to assess models' ability to derive correct conclusions from premises using first-order logic reasoning. Similarly, Saparov & He (2023) develop a benchmark evaluating LLMs on symbolic ontologies, revealing that models often struggle with proof planning and rely on knowledge retrieval rather than genuine reasoning. These findings highlight the potential of neurosymbolic methods to better understand LLM reasoning. Recent work also explores formal analysis techniques to enhance LLM reasoning. For instance, Pan et al. (2023) use LLMs to translate natural language problems into symbolic formulations, which are then processed by deterministic symbolic solvers for inference. (Li et al., 2025b) demonstrate the promise of leveraging LLMs' symbolic reasoning for mathematical problem-solving. Other studies focus on domain-specific reasoning: Fang et al. (2024a) propose an LLM-based agent for text-based games, designed to tackle symbolic challenges and achieve in-game objectives, while Nahid & Rafiei (2024) introduce a framework to enhance LLMs' symbolic reasoning by normalizing web tables. These studies reveal LLMs' limitations in structured reasoning while emphasizing the value of integrating formal analysis to strengthen their capabilities.", + "bbox": [ + 109, + 510, + 883, + 737 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "Theoretical analysis of ICL and CoT reasoning The success of in-context learning (ICL) and CoT prompting in enhancing LLM reasoning has sparked significant interest in understanding their underlying mechanisms from theoretical perspectives. Extensive prior studies on ICL suggest that transformer-based in-context learners effectively implement various learning algorithms, encoding implicit, context-dependent models for generation within their hidden activations—models that can be trained through demonstrations as these activations are computed. For instance, Akyurek et al. (2022) investigate this hypothesis in the context of linear regression models, while Von Oswald et al. (2023) and Dai et al. (2023) explore how transformer-based in-context learners function as meta-optimizers, effectively learning models via gradient descent during their forward pass. From a Bayesian inference perspective, Xie et al. (2022); Zhang et al. (2023) and Wang et al. (2023e) demonstrate that transformer-based in-context learners can achieve the Bayes-optimal predictor when demonstrations are selected based on a shared latent concept variable, such", + "bbox": [ + 109, + 757, + 883, + 924 + ], + "page_idx": 37 + }, + { + "type": "page_number", + "text": "38", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "as format or task information, even in the presence of distribution mismatches between demonstrations and training data. Additionally, Elhage et al. (2021); Olsson et al. (2022) examine ICL through the concept of \"induction heads\" - attention heads that implement a simple algorithm to complete tasks, providing evidence that induction heads may underlie much of the in-context learning observed in transformer-based models.", + "bbox": [ + 109, + 103, + 883, + 164 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "The body of work exploring the theoretical insights into CoT mechanisms remains relatively limited, with most studies focusing on the expressiveness of LLMs when using CoT. A pioneering study by Feng et al. (2023a) investigates LLMs with CoT for solving mathematical and decision-making problems. Using circuit complexity theory (Arora & Barak, 2009), they demonstrate that bounded-depth transformers cannot solve basic arithmetic or equation tasks unless the model size grows super-polynomially. In contrast, they prove that constant-size models can solve these tasks, along with a wide range of decision-making problems such as Dynamic Programming, by generating CoT derivations in a common mathematical language. Li et al. (2024h) extend these findings, providing a tighter upper bound on the expressiveness of constant-depth transformers with CoT. However, these studies do not explore how the length of a CoT affects model reasoning power. To address this gap, Merrill & Sabharwal (2024) find that a logarithmic number of intermediate steps (relative to input length) offers only marginal gains over standard transformers, while a linear number of steps under the assumption of projected pre-norm (a slight generalization of standard pre-norm) enables the recognition of all regular languages. Furthermore, polynomially many steps, combined with generalized pre-norm, allow transformers to recognize exactly the class of polynomial-time solvable problems.", + "bbox": [ + 109, + 171, + 883, + 383 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "6.2.3 Data Challenges in Advancing Reasoning Capabilities", + "text_level": 1, + "bbox": [ + 109, + 404, + 570, + 421 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Challenges in scaling question and outcome supervision for RL As discussed earlier, development trends in both general and task-specific domains are converging, with a focus on employing end-to-end RL to minimize inductive bias and push the boundaries of intelligence. Frontier models now incorporate competition-level problems annually for training, as these represent the most challenging tasks and are annotated with high-quality answers by human experts. However, we are nearing the limits of available human-annotated data, raising the question of whether methods beyond human labeling can enable the continuous scaling of RL. This challenge is particularly relevant in domains where prompts are not easily verifiable, such as open-ended generation, software engineering, and most agentic tasks.", + "bbox": [ + 109, + 431, + 883, + 555 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Challenges in reward modeling Early studies have investigated the feasibility of process supervision (Lightman et al., 2024) and its effectiveness in inference-time scaling (Snell et al., 2025). However, its high annotation costs and ambiguous definition—particularly in long CoT scenarios where self-reflection is encouraged—have limited its adoption in large-scale reinforcement learning. Despite these challenges, the key advantage of accurate process supervision is its ability to reduce hallucinations, making it essential for automated reasoning and knowledge discovery. Additionally, as discussed in Section 4.2, the training paradigm for reward models is closely tied to that of reasoning models. This raises concerns about whether allocating the same annotation budget directly to reasoning models could lead to more stable and general improvements, potentially limiting the gains achievable through inference-time scaling.", + "bbox": [ + 109, + 575, + 883, + 712 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "7 Conclusion", + "text_level": 1, + "bbox": [ + 112, + 736, + 246, + 752 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "In this work, we provide a timely and comprehensive survey on LLM reasoning. We first formalize the goal of LLM reasoning and consolidate past research by categorizing reasoning techniques along two dimensions: regimes and architectures. Within each of these dimensions, we review both input and output perspectives in detail. Our review highlights emerging trends, including the shift from inference-time scaling to learning-to-reason regimes, and the transition from standalone models to agentic systems. We also review and compare a wide range of learning algorithms, including supervised fine-tuning and reinforcement learning, as well as the training of reasoners and training of verifiers. Despite these advancements, challenges remain in evaluating reasoning and understanding real reasoning mechanisms as well as addressing data challenges in advancing reasoning capabilities. We encourage future research to further explore these trends, such as inference-aware learning-to-reason and automated multi-agent design, to enhance LLM reasoning.", + "bbox": [ + 109, + 773, + 883, + 925 + ], + "page_idx": 38 + }, + { + "type": "page_number", + "text": "39", + "bbox": [ + 488, + 948, + 508, + 960 + ], + "page_idx": 38 + }, + { + "type": "text", + "text": "Acknowledgments", + "text_level": 1, + "bbox": [ + 112, + 103, + 253, + 119 + ], + "page_idx": 39 + }, + { + "type": "ref_text", + "text": "We thank M Saiful Bari, Semih Yavuz and Yingbo Zhou for helpful discussions.", + "bbox": [ + 112, + 128, + 686, + 143 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 112, + 162, + 215, + 178 + ], + "page_idx": 39 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "American invitational mathematics examination. Mathematical Association of America, 2025. https://maa.org/maa-invitational-competitions/.", + "Rishabh Agarwal, Avi Singh, Lei Zhang, Bernd Bohnet, Luis Rosias, Stephanie Chan, Biao Zhang, Ankesh Anand, Zaheer Abbas, Azade Nova, et al. Many-shot in-context learning. Advances in Neural Information Processing Systems, 37:76930-76966, 2024.", + "Sweta Agrawal, Chunting Zhou, Mike Lewis, Luke Zettlemoyer, and Marjan Ghazvininejad. In-context examples selection for machine translation. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Findings of the Association for Computational Linguistics: ACL 2023, pp. 8857-8873, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-acl.564. URL https://aclanthology.org/2023-findings-acl.564/.", + "Arash Ahmadian, Chris Cremer, Matthias Gallé, Marzieh Fadaee, Julia Kreutzer, Olivier Pietquin, Ahmet Üstün, and Sara Hooker. Back to basics: Revisiting reinforce style optimization for learning from human feedback in llms. arXiv preprint arXiv:2402.14740, 2024.", + "Janice Ahn, Rishu Verma, Renze Lou, Di Liu, Rui Zhang, and Wenpeng Yin. Large language models for mathematical reasoning: Progresses and challenges. In Neele Falk, Sara Papi, and Mike Zhang (eds.), Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics: Student Research Workshop, pp. 225-237, St. Julian's, Malta, March 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.eacl-srw.17/.", + "Afra Feyza Akyürek, Ekin Akyürek, Aman Madaan, Ashwin Kalyan, Peter Clark, Derry Wijaya, and Niket Tandon. Rl4f: Generating natural language feedback with reinforcement learning for repairing model outputs. arXiv preprint arXiv:2305.08844, 2023.", + "Ekin Akyürek, Dale Schuurmans, Jacob Andreas, Tengyu Ma, and Denny Zhou. What learning algorithm is in-context learning? investigations with linear models. In The Eleventh International Conference on Learning Representations, 2022.", + "AlphaProof and AlphaGeometry teams. AI achieves silver-medal standard solving international mathematical olympiad problems. https://deepmind.google/discover/blog/ai-solves-imo-problems-at-silver-medal-level/, 2024.", + "Konstantine Arkoudas. Gpt-4 can't reason. arXiv preprint arXiv:2308.03762, 2023.", + "Sanjeev Arora and Boaz Barak. Computational complexity: a modern approach. Cambridge University Press, 2009.", + "Krishna Aswani, Huilin Lu, Pranav Patankar, Priya Dhalwani, Xue Tan, Jayant Ganeshmohan, and Simon Lacasse. Auto-evolve: Enhancing large language model's performance via self-reasoning framework. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (eds.), Findings of the Association for Computational Linguistics: EMNLP 2024, pp. 13243-13257, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-emnlp.774. URL https://aclanthology.org/2024-findings-emnlp.774/.", + "Jacob Austin, Augustus Odena, Maxwell Nye, Maarten Bosma, Henryk Michalewski, David Dohan, Ellen Jiang, Carrie Cai, Michael Terry, Quoc Le, et al. Program synthesis with large language models. arXiv preprint arXiv:2108.07732, 2021.", + "Mohammad Gheshlaghi Azar, Zhaohan Daniel Guo, Bilal Piot, Remi Munos, Mark Rowland, Michal Valko, and Daniele Calandriello. A general theoretical paradigm to understand learning from human preferences. In International Conference on Artificial Intelligence and Statistics, pp. 4447-4455. PMLR, 2024." + ], + "bbox": [ + 114, + 186, + 883, + 924 + ], + "page_idx": 39 + }, + { + "type": "page_number", + "text": "40", + "bbox": [ + 488, + 948, + 508, + 959 + ], + "page_idx": 39 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Zhangir Azerbayev, Hailey Schoelkopf, Keiran Paster, Marco Dos Santos, Stephen Marcus McAleer, Albert Q Jiang, Jia Deng, Stella Biderman, and Sean Welleck. LLemma: An open language model for mathematics. In International Conference on Learning Representations (ICLR), 2024.", + "Yuntao Bai, Andy Jones, Kamal Ndousse, Amanda Askell, Anna Chen, Nova DasSarma, Dawn Drain, Stanislav Fort, Deep Ganguli, Tom Henighan, et al. Training a helpful and harmless assistant with reinforcement learning from human feedback. arXiv preprint arXiv:2204.05862, 2022.", + "Ananth Balashankar, Ziteng Sun, Jonathan Berant, Jacob Eisenstein, Michael Collins, Adrian Hutter, Jong Lee, Chirag Nagpal, Flavien Prost, Aradhana Sinha, Ananda Theertha Suresh, and Ahmad Beirami. Infalign: Inference-aware language model alignment. CoRR, abs/2412.19792, 2024. doi: 10.48550/ARXIV.2412.19792. URL https://doi.org/10.48550/arXiv.2412.19792.", + "Bruno Barras, Samuel Boutin, Cristina Cornes, Judicael Courant, Jean-Christophe Filliatre, Eduardo Gimenez, Hugo Herbelin, Gerard Huet, Cesar Munoz, Chetan Murthy, et al. The Coq proof assistant reference manual: Version 6.1. PhD thesis, Inria, 1997.", + "Richard Bellman. Dynamic programming and stochastic control processes. Information and Control, 1 (3):228-239, 1958. ISSN 0019-9958. doi: https://doi.org/10.1016/S0019-9958(58)80003-0. URL https://www.sciencedirect.com/science/article/pii/S0019995858800030.", + "Bespoke Labs. Bespoke-stratos: The unreasonable effectiveness of reasoning distillation. www.bespokelabs.ai/blog/bespoke-stratos-the-unreasonable-effectiveness-of-reasoning-distillation, 2025. Accessed: 2025-01-22.", + "Maciej Besta, Nils Blach, Ales Kubicek, Robert Gerstenberger, Michal Podstawski, Lukas Gianinazzi, Joanna Gajda, Tomasz Lehmann, Hubert Niewiadomski, Piotr Nczyk, et al. Graph of thoughts: Solving elaborate problems with large language models. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 38, pp. 17682-17690, 2024.", + "Maciej Besta, Julia Barth, Eric Schreiber, Ales Kubicek, Afonso Catarino, Robert Gerstenberger, Piotr Nczyk, Patrick Iff, Yueling Li, Sam Houliston, et al. Reasoning language models: A blueprint. arXiv preprint arXiv:2501.11223, 2025.", + "Prajjwal Bhargava and Vincent Ng. Commonsense knowledge reasoning and generation with pre-trained language models: A survey. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 36, pp. 12317-12325, 2022.", + "Zhenni Bi, Kai Han, Chuanjian Liu, Yehui Tang, and Yunhe Wang. Forest-of-thought: Scaling test-time compute for enhancing llm reasoning. arXiv preprint arXiv:2412.09078, 2024. URL https://arxiv.org/pdf/2412.09078.", + "Vadim Borisov, Kathrin Sessler, Tobias Leemann, Martin Pawelczyk, and Gjergji Kasneci. Language models are realistic tabular data generators. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=cEygmmQNOeI.", + "Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling. arXiv preprint arXiv:2407.21787, 2024.", + "Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models are few-shot learners, 2020. URL https://arxiv.org/abs/2005.14165." + ], + "bbox": [ + 114, + 102, + 883, + 924 + ], + "page_idx": 40 + }, + { + "type": "page_number", + "text": "41", + "bbox": [ + 488, + 948, + 506, + 959 + ], + "page_idx": 40 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Oana-Maria Camburu, Tim Rocktäschel, Thomas Lukasiewicz, and Phil Blunsom. e-snli: Natural language inference with natural language explanations. Advances in Neural Information Processing Systems, 31, 2018.", + "Lorenzo Canese, Gian Carlo Cardarilli, Luca Di Nunzio, Rocco Fazzolari, Daniele Giardino, Marco Re, and Sergio Spanò. Multi-agent reinforcement learning: A review of challenges and applications. Applied Sciences, 11(11):4948, 2021.", + "Yihan Cao, Shuyi Chen, Ryan Liu, Zhiruo Wang, and Daniel Fried. API-assisted code generation for question answering on varied table structures. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pp. 14536-14548, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.897. URL https://aclanthology.org/2023.emnlp-main.897/.", + "Stephen Casper, Xander Davies, Claudia Shi, Thomas Krendl Gilbert, Jérémy Scheurer, Javier Rando, Rachel Freedman, Tomasz Korbak, David Lindner, Pedro Freire, et al. Open problems and fundamental limitations of reinforcement learning from human feedback. arXiv preprint arXiv:2307.15217, 2023.", + "Chi-Min Chan, Weize Chen, Yusheng Su, Jianxuan Yu, Wei Xue, Shanghang Zhang, Jie Fu, and Zhiyuan Liu. Chateval: Towards better llm-based evaluators through multi-agent debate. arXiv preprint arXiv:2308.07201, 2023.", + "Edward Y Chang. Socrasynth: Multi-llm reasoning with conditional statistics. arXiv preprint arXiv:2402.06634, 2024.", + "Baian Chen, Chang Shu, Ehsan Shareghi, Nigel Collier, Karthik Narasimhan, and Shunyu Yao. Fireact: Toward language agent fine-tuning. CoRR, abs/2310.05915, 2023a. doi: 10.48550/ARXIV.2310.05915. URL https://doi.org/10.48550/arXiv.2310.05915.", + "Bei Chen, Fengji Zhang, Anh Nguyen, Daoguang Zan, Zeqi Lin, Jian-Guang Lou, and Weizhu Chen. Codet: Code generation with generated tests. In The Eleventh International Conference on Learning Representations, 2023b. URL https://openreview.net/forum?id=ktrw68Cmu9c.", + "Guoxin Chen, Minpeng Liao, Chengxi Li, and Kai Fan. Alphamath almost zero: process supervision without process. arXiv preprint arXiv:2405.03553, 2024a.", + "Hanjie Chen, Faeze Brahman, Xiang Ren, Yangfeng Ji, Yejin Choi, and Swabha Swayamdipta. Information-theoretic evaluation of free-text rationales with conditional $\\mathcal{V}$ -information. In Workshop on Trustworthy and Socially Responsible Machine Learning, NeurIPS 2022, 2022.", + "Justin Chih-Yao Chen, Swarnadeep Saha, and Mohit Bansal. Reconcile: Round-table conference improves reasoning via consensus among diverse llms. arXiv preprint arXiv:2309.13007, 2023c.", + "Justin Chih-Yao Chen, Archiki Prasad, Swarnadeep Saha, Elias Stengel-Eskin, and Mohit Bansal. Magicore: Multi-agent, iterative, coarse-to-fine refinement for reasoning, 2024b. URL https://arxiv.org/abs/2409.12147.", + "Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Ponde de Oliveira Pinto, Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, Greg Brockman, Alex Ray, Raul Puri, Gretchen Krueger, Michael Petrov, Heidi Khlaaf, Girish Sastry, Pamela Mishkin, Brooke Chan, Scott Gray, Nick Ryder, Mikhail Pavlov, Alethea Power, Lukasz Kaiser, Mohammad Bavarian, Clemens Winter, Philippe Tillet, Felipe Petroski Such, Dave Cummings, Matthias Plappert, Fotios Chantzis, Elizabeth Barnes, Ariel Herbert-Voss, William Hebgen Guss, Alex Nichol, Alex Paino, Nikolas Tezak, Jie Tang, Igor Babuschkin, Suchir Balaji, Shantanu Jain, William Saunders, Christopher Hesse, Andrew N. Carr, Jan Leike, Josh Achiam, Vedant Misra, Evan Morikawa, Alec Radford, Matthew Knight, Miles Brundage, Mira Murati, Katie Mayer, Peter Welinder, Bob McGrew, Dario Amodei, Sam McCandlish, Ilya Sutskever, and Wojciech Zaremba. Evaluating large language models trained on code, 2021." + ], + "bbox": [ + 112, + 102, + 883, + 926 + ], + "page_idx": 41 + }, + { + "type": "page_number", + "text": "42", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 41 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Pei Chen, Boran Han, and Shuai Zhang. Comm: Collaborative multi-agent, multi-reasoning-path prompting for complex problem solving. arXiv preprint arXiv:2404.17729, 2024c.", + "Wei-Lin Chen, Cheng-Kuang Wu, Yun-Nung Chen, and Hsin-Hsi Chen. Self-ICL: Zero-shot in-context learning with self-generated demonstrations. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pp. 15651–15662, Singapore, December 2023d. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.968. URL https://aclanthology.org/2023.emnlp-main.968/.", + "Wenhu Chen. Large language models are few(1)-shot table reasoners. In Andreas Vlachos and Isabelle Augenstein (eds.), Findings of the Association for Computational Linguistics: EACL 2023, pp. 1120-1130, Dubrovnik, Croatia, May 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.findings-eacl.83. URL https://aclanthology.org/2023-findings-eacl.83/.", + "Wenhu Chen, Xueguang Ma, Xinyi Wang, and William W. Cohen. Program of thoughts prompting: Disentangling computation from reasoning for numerical reasoning tasks. Transactions on Machine Learning Research, 2023e. ISSN 2835-8856. URL https://openreview.net/forum?id=YfZ4ZPt8zd.", + "Xingyu Chen, Jiahao Xu, Tian Liang, Zhiwei He, Jianhui Pang, Dian Yu, Linfeng Song, Qiuzhi Liu, Mengfei Zhou, Zhuosheng Zhang, et al. Do not think that much for $2 + 3 = ?$ on the overthinking of o1-like llms. arXiv preprint arXiv:2412.21187, 2024d.", + "Zehui Chen, Kuikun Liu, Qiuchen Wang, Wenwei Zhang, Jiangning Liu, Dahua Lin, Kai Chen, and Feng Zhao. Agent-flan: Designing data and methods of effective agent tuning for large language models. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Findings of the Association for Computational Linguistics, ACL 2024, Bangkok, Thailand and virtual meeting, August 11-16, 2024, pp. 9354-9366. Association for Computational Linguistics, 2024e. URL https://doi.org/10.18653/v1/2024-findings-acl.557.", + "Zihan Chen, Song Wang, Zhen Tan, Jundong Li, and Cong Shen. Maple: Many-shot adaptive pseudo-labeling for in-context learning, 2025. URL https://arxiv.org/abs/2505.16225.", + "Zhoujun Cheng, Tianbao Xie, Peng Shi, Chengzu Li, Rahul Nadkarni, Yushi Hu, Caiming Xiong, Dragomir Radev, Mari Ostendorf, Luke Zettlemoyer, Noah A. Smith, and Tao Yu. Binding language models in symbolic languages. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=1H1PV42cbF.", + "Yinlam Chow, Guy Tennenholtz, Izzeddin Gur, Vincent Zhuang, Bo Dai, Sridhar Thiagarajan, Craig Boutilier, Rishabh Agarwal, Aviral Kumar, and Aleksandra Faust. Inference-aware fine-tuning for best-of-n sampling in large language models. CoRR, abs/2412.15287, 2024. doi: 10.48550/ARXIV.2412.15287. URL https://doi.org/10.48550/arXiv.2412.15287.", + "Miruna Clinciu, Arash Eshghi, and Helen Hastie. A study of automatic metrics for the evaluation of natural language explanations. arXiv preprint arXiv:2103.08545, 2021.", + "Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, et al. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021.", + "Jonathan Cook, Tim Rocktäschel, Jakob Foerster, Dennis Aumiller, and Alex Wang. Ticking all the boxes: Generated checklists improve llm evaluation and generation. arXiv preprint arXiv:2410.03608, 2024.", + "Ganqu Cui, Lifan Yuan, Zefan Wang, Hanbin Wang, Wendi Li, Bingxiang He, Yuchen Fan, Tianyu Yu, Qixin Xu, Weize Chen, Jiarui Yuan, Huayu Chen, Kaiyan Zhang, Xingtai Lv, Shuo Wang, Yuan Yao, Xu Han, Hao Peng, Yu Cheng, Zhiyuan Liu, Maosong Sun, Bowen Zhou, and Ning Ding. Process reinforcement through implicit rewards, 2025. URL https://arxiv.org/abs/2502.01456." + ], + "bbox": [ + 112, + 102, + 883, + 924 + ], + "page_idx": 42 + }, + { + "type": "page_number", + "text": "43", + "bbox": [ + 488, + 948, + 508, + 959 + ], + "page_idx": 42 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Chris Cummins, Volker Seeker, Dejan Grubisic, Mostafa Elhoushi, Youwei Liang, Baptiste Roziere, Jonas Gehring, Fabian Gloeckle, Kim Hazelwood, Gabriel Synnaeve, et al. Large language models for compiler optimization. arXiv preprint arXiv:2309.07062, 2023. URL https://arxiv.org/abs/2309.07062.", + "Damai Dai, Yutao Sun, Li Dong, Yaru Hao, Shuming Ma, Zhifang Sui, and Furu Wei. Why can GPT learn in context? language models secretly perform gradient descent as meta-optimizers. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Findings of the Association for Computational Linguistics: ACL 2023, pp. 4005–4019, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-acl.247. URL https://aclanthology.org/2023-findings-acl.247/.", + "Ning Dai, Zheng Wu, Renjie Zheng, Ziyun Wei, Wenlei Shi, Xing Jin, Guanlin Liu, Chen Dun, Liang Huang, and Lin Yan. Process supervision-guided policy optimization for code generation. arXiv preprint arXiv:2410.17621, 2024.", + "Mehul Damani, Idan Shenfeld, Andi Peng, Andreea Bobu, and Jacob Andreas. Learning how hard to think: Input-adaptive allocation of LM computation. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=6qUUgw9bAZ.", + "Debrup Das, Debopriyo Banerjee, Somak Aditya, and Ashish Kulkarni. Mathsensei: A tool-augmented large language model for mathematical reasoning. arXiv preprint arXiv:2402.17231, 2024.", + "Leonardo De Moura, Soonho Kong, Jeremy Avigad, Floris Van Doorn, and Jakob von Raumer. The lean theorem prover (system description). In _Automated Deduction-CADE-25: 25th International Conference on Automated Deduction_, Berlin, Germany, August 1-7, 2015, Proceedings 25, pp. 378-388. Springer, 2015.", + "DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, Yu Wu, Z. F. Wu, Zhibin Gou, Zhihong Shao, Zhuoshu Li, Ziyi Gao, Aixin Liu, Bing Xue, Bingxuan Wang, Bochao Wu, Bei Feng, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao, Guanting Chen, Guowei Li, H. Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Qu, Hui Li, Jianzhong Guo, Jiashi Li, Jiawei Wang, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, J. L. Cai, Jiaqi Ni, Jian Liang, Jin Chen, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Liang Zhao, Litong Wang, Liyue Zhang, Lei Xu, Leyi Xia, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Meng Li, Miaojun Wang, Mingming Li, Ning Tian, Panpan Huang, Peng Zhang, Qiancheng Wang, Qinyu Chen, Qiushi Du, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, R. J. Chen, R. L. Jin, Ruyi Chen, Shanghai Lu, Shangyan Zhou, Shanhuang Chen, Shengfeng Ye, Shiyu Wang, Shuiping Yu, Shunfeng Zhou, Shuting Pan, S. S. Li, Shuang Zhou, Shaoqing Wu, Shengfeng Ye, Tao Yun, Tian Pei, Tianyu Sun, T. Wang, Wangding Zeng, Wanjia Zhao, Wen Liu, Wenfeng Liang, Wenjun Gao, Wenqin Yu, Wentao Zhang, W. L. Xiao, Wei An, Xiaodong Liu, Xiaohan Wang, Xiaokang Chen, Xiaotao Nie, Xin Cheng, Xin Liu, Xin Xie, Xingchao Liu, Xinyu Yang, Xinyuan Li, Xuecheng Su, Xuheng Lin, X. Q. Li, Xiangyue Jin, Xiaojin Shen, Xiaosha Chen, Xiaowen Sun, Xiaoxiang Wang, Xinnan Song, Xinyi Zhou, Xianzu Wang, Xinxia Shan, Y. K. Li, Y. Q. Wang, Y. X. Wei, Yang Zhang, Yanhong Xu, Yao Li, Yao Zhao, Yaofeng Sun, Yaohui Wang, Yi Yu, Yichao Zhang, Yifan Shi, Yiliang Xiong, Ying He, Yishi Piao, Yisong Wang, Yixuan Tan, Yiyang Ma, Yiyuan Liu, Yongqiang Guo, Yuan Ou, Yuduan Wang, Yue Gong, Yuheng Zou, Yujia He, Yunfan Xiong, Yuxiang Luo, Yuxiang You, Yuxuan Liu, Yuyang Zhou Y. X. Zhu Yanhong Xu Yanping Huang Yaohui Li Yi ZhengYuchen ZhuYunxian Ma Ying Tang Yukun Zha Yuting YanZ.Z.Ren Zehui Ren,Zhangli ShaZhe FuZhean XuZhenda Xie Zhengyan Zhang,Zhenwen Hao,Zhicheng Ma,Zhigang Yan,Zhiyu WuZihui GuZijia ZhuZijun Liu Zilin Li Ziwei Xie Ziyang Song Zizheng Pan Zhen Huang Zhipeng Xu Zhongyu Zhang and Zhen Zhang.Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning 2025. URL https://arxiv.org/abs/2501.12948.", + "Shumin Deng, Ningyu Zhang, Nay Oo, and Bryan Hooi. Towards a unified view of answer calibration for multi-step reasoning. In Bhavana Dalvi Mishra, Greg Durrett, Peter Jansen, Ben Lipkin, Danilo Neves Ribeiro, Lionel Wong, Xi Ye, and Wenting Zhao (eds.), Proceedings of the 2nd Workshop on Natural" + ], + "bbox": [ + 114, + 102, + 883, + 926 + ], + "page_idx": 43 + }, + { + "type": "page_number", + "text": "44", + "bbox": [ + 488, + 948, + 508, + 959 + ], + "page_idx": 43 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Language Reasoning and Structured Explanations (@ACL 2024), pp. 25-38, Bangkok, Thailand, August 2024a. Association for Computational Linguistics. URL https://aclanthology.org/2024.nlrse-1.3/.", + "Yihe Deng, Weitong Zhang, Zixiang Chen, and Quanquan Gu. Rephrase and respond: Let large language models ask better questions for themselves. arXiv preprint arXiv:2311.04205, 2023a.", + "Yuntian Deng, Kiran Prasad, Roland Fernandez, Paul Smolensky, Vishrav Chaudhary, and Stuart M. Shieber. Implicit chain of thought reasoning via knowledge distillation. CoRR, abs/2311.01460, 2023b.", + "Yuntian Deng, Yejin Choi, and Stuart M. Shieber. From explicit cot to implicit cot: Learning to internalize cot step by step. CoRR, abs/2405.14838, 2024b. URL https://doi.org/10.48550/arXiv.2405.14838.", + "Darshan Deshpande, Selvan Sunitha Ravi, Sky CH-Wang, Bartosz Mielczarek, Anand Kannappan, and Rebecca Qian. Glider: Grading llm interactions and decisions using explainable ranking. arXiv preprint arXiv:2412.14140, 2024.", + "Hanze Dong, Wei Xiong, Deepanshu Goyal, Yihan Zhang, Winnie Chow, Rui Pan, Shizhe Diao, Jipeng Zhang, Kashun Shum, and Tong Zhang. RAFT: reward ranked finetuning for generative foundation model alignment. Trans. Mach. Learn. Res., 2023, 2023. URL https://openreview.net/forum?id=m7p507zb1Y.", + "Haoyu Dong and Zhiruo Wang. Large language models for tabular data: Progresses and future directions. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '24, pp. 2997-3000, New York, NY, USA, 2024. Association for Computing Machinery. ISBN 97898400704314. doi: 10.1145/3626772.3661384. URL https://doi.org/10.1145/3626772.3661384.", + "Qingxiu Dong, Lei Li, Damai Dai, Ce Zheng, Jingyuan Ma, Rui Li, Heming Xia, Jingjing Xu, Zhiyong Wu, Baobao Chang, et al. A survey on in-context learning. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pp. 1107-1128, 2024.", + "Jiri Dostál. Theory of problem solving. Procedia - Social and Behavioral Sciences, 174:2798-2805, 2015. ISSN 1877-0428. doi: https://doi.org/10.1016/j.sbspro.2015.01.970. URL https://www.sciencedirect.com/science/article/pii/S1877042815010290. International Conference on New Horizons in Education, INTE 2014, 25-27 June 2014, Paris, France.", + "Shihan Dou, Yan Liu, Haoxiang Jia, Limao Xiong, Enyu Zhou, Wei Shen, Junjie Shan, Caishuang Huang, Xiao Wang, Xiaoran Fan, Zhiheng Xi, Yuhao Zhou, Tao Ji, Rui Zheng, Qi Zhang, Xuanjing Huang, and Tao Gui. Stepcoder: Improve code generation with reinforcement learning from compiler feedback. CoRR, abs/2402.01391, 2024a. doi: 10.48550/ARXIV.2402.01391. URL https://doi.org/10.48550/arXiv.2402.01391.", + "Zi-Yi Dou, Cheng-Fu Yang, Xueqing Wu, Kai-Wei Chang, and Nanyun Peng. Re-ReST: Reflection-reinforced self-training for language agents. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (eds.), Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pp. 15394-15411, Miami, Florida, USA, November 2024b. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.861. URL https://aclanthology.org/2024.emnlp-main.861/.", + "Dheeru Dua, Shivanshu Gupta, Sameer Singh, and Matt Gardner. Successive prompting for decomposing complex questions. In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang (eds.), Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 1251-1265, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.emnlp-main.81. URL https://aclanthology.org/2022.emnlp-main.81.", + "Jinhao Duan, Shiqi Wang, James Diffenderfer, Lichao Sun, Tianlong Chen, Bhavya Kailkhura, and Kaidi Xu. Reta: Recursively thinking ahead to improve the strategic reasoning of large language models. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 2232-2246, 2024." + ], + "bbox": [ + 112, + 102, + 885, + 926 + ], + "page_idx": 44 + }, + { + "type": "page_number", + "text": "45", + "bbox": [ + 488, + 948, + 508, + 959 + ], + "page_idx": 44 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Tom Duenas and Diana Ruiz. The path to superintelligence: A critical analysis of openai's five levels of ai progression. Research Gate, 2024.", + "Subhabrata Dutta, Joykirat Singh, Soumen Chakrabarti, and Tanmoy Chakraborty. How to think step-by-step: A mechanistic understanding of chain-of-thought reasoning. Transactions on Machine Learning Research, 2024. ISSN 2835-8856. URL https://openreview.net/forum?id=uHLDkQVtyC.", + "Nelson Elhage, Neel Nanda, Catherine Olsson, Tom Henighan, Nicholas Joseph, Ben Mann, Amanda Askell, Yuntao Bai, Anna Chen, Tom Conerly, et al. A mathematical framework for transformer circuits. Transformer Circuits Thread, 1(1):12, 2021.", + "Andrew Estornell, Jean-Francois Ton, Yuanshun Yao, and Yang Liu. Acc-debate: An actor-critic approach to multi-agent debate. arXiv preprint arXiv:2411.00053, 2024.", + "Kawin Ethayarajh, Winnie Xu, Niklas Muennighoff, Dan Jurafsky, and Douwe Kiela. Kto: Model alignment as prospect theoretic optimization. arXiv preprint arXiv:2402.01306, 2024.", + "Meng Fang, Shilong Deng, Yudi Zhang, Zijing Shi, Ling Chen, Mykola Pechenizkiy, and Jun Wang. Large language models are neurosymbolic reasoners. Proceedings of the AAAI Conference on Artificial Intelligence, 38(16):17985-17993, Mar. 2024a. doi: 10.1609/aaai.v38i16.29754. URL https://ojs.aaai.org/index.php/AAAI/article/view/29754.", + "Xi Fang, Weijie Xu, Fiona Anting Tan, Ziqing Hu, Jiani Zhang, Yanjun Qi, Srinivasan H. Sengamedu, and Christos Faloutsos. Large language models (LLMs) on tabular data: Prediction, generation, and understanding - a survey. Transactions on Machine Learning Research, 2024b. ISSN 2835-8856. URL https://openreview.net/forum?id=IZnrCGF9WI.", + "Guhao Feng, Bohang Zhang, Yuntian Gu, Haotian Ye, Di He, and Liwei Wang. Towards revealing the mystery behind chain of thought: a theoretical perspective. Advances in Neural Information Processing Systems, 36:70757-70798, 2023a.", + "Jiazhan Feng, Ruochen Xu, Junheng Hao, Hiteshi Sharma, Yelong Shen, Dongyan Zhao, and Weizhu Chen. Language models can be deductive solvers. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Findings of the Association for Computational Linguistics: NAACL 2024, pp. 4026-4042, Mexico City, Mexico, June 2024a. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-nacl.254. URL https://aclanthology.org/2024 findings-nacl.254/.", + "Xiachong Feng, Longxu Dou, Ella Li, Qinghao Wang, Haochuan Wang, Yu Guo, Chang Ma, and Lingpeng Kong. A survey on large language model-based social agents in game-theoretic scenarios, 2024b. URL https://arxiv.org/abs/2412.03920.", + "Xidong Feng, Ziyu Wan, Muning Wen, Stephen Marcus McAleer, Ying Wen, Weinan Zhang, and Jun Wang. Alphazero-like tree-search can guide large language model decoding and training. arXiv preprint arXiv:2309.17179, 2023b.", + "Chrisantha Fernando, Dylan Sunil Banarse, Henryk Michalewski, Simon Osindero, and Tim Rocktäschel. Promptbreeder: Self-referential self-improvement via prompt evolution. In *Forty-first International Conference on Machine Learning*, 2024. URL https://openreview.net/forum?id=9ZxnPZGmPU.", + "Emily First, Markus N Rabe, Talia Ringer, and Yuriy Brun. Baldur: Whole-proof generation and repair with large language models. In Proceedings of the 31st ACM Joint European Software Engineering Conference and Symposium on the Foundations of Software Engineering, pp. 1229-1241, 2023.", + "Yann Fleureau, Jia Li, Edward Beeching, Lewis Tunstall, Ben Lipkin, Roman Soletskyi, Shengyi Costa Huang, and Kashif Rasul. How NuminaMath won the 1st AIMO Progress Prize. https://huggingface.co/blog/winning-aimo-progress-prize, 2024.", + "Adam Fourney, Gagan Bansal, Hussein Mozannar, Cheng Tan, Eduardo Salinas, Friederike Niedtner, Grace Proebsting, Griffin Bassman, Jack Gerrits, Jacob Alber, et al. Magentic-one: A generalist multi-agent system for solving complex tasks. arXiv preprint arXiv:2411.04468, 2024." + ], + "bbox": [ + 112, + 102, + 883, + 926 + ], + "page_idx": 45 + }, + { + "type": "page_number", + "text": "46", + "bbox": [ + 488, + 948, + 509, + 959 + ], + "page_idx": 45 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Adrian Garret Gabriel, Alaa Alameer Ahmad, and Shankar Kumar Jeyakumar. Advancing agentic systems: Dynamic task decomposition, tool integration and evaluation using novel metrics and dataset, 2024. URL https://arxiv.org/abs/2410.22457.", + "Kanishk Gandhi, Jan-Philipp Franken, Tobias Gerstenberg, and Noah Goodman. Understanding social reasoning in language models with language models. In Thirty-seventh Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2023. URL https://openreview.net/forum?id=8bqjirgxQM.", + "Deep Ganguli, Liane Lovitt, Jackson Kernion, Amanda Askell, Yuntao Bai, Saurav Kadavath, Ben Mann, Ethan Perez, Nicholas Schiefer, Kamal Ndousse, et al. Red teaming language models to reduce harms: Methods, scaling behaviors, and lessons learned. arXiv preprint arXiv:2209.07858, 2022.", + "Jiaxuan Gao, Shusheng Xu, Wenjie Ye, Weilin Liu, Chuyi He, Wei Fu, Zhiyu Mei, Guangju Wang, and Yi Wu. On designing effective rl reward at training time for llm reasoning. arXiv preprint arXiv:2410.15115, 2024a.", + "Peizhong Gao, Ao Xie, Shaoguang Mao, Wenshan Wu, Yan Xia, Haipeng Mi, and Furu Wei. Meta reasoning for large language models. arXiv preprint arXiv:2406.11698, 2024b.", + "Olga Golovneva, Moya Chen, Spencer Poff, Martin Corredor, Luke Zettlemoyer, Maryam Fazel-Zarandi, and Asli Celikyilmaz. Roscoe: A suite of metrics for scoring step-by-step reasoning. arXiv preprint arXiv:2212.07919, 2022.", + "Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. Advances in neural information processing systems, 27, 2014. URL https://proceedings.neurips.cc/paper_files/paper/2014/file/5ca3e9b122f61f8f06494c97b1afccf3-Paper.pdf.", + "Zhibin Gou, Zhihong Shao, Yeyun Gong, Yelong Shen, Yujiu Yang, Minlie Huang, Nan Duan, and Weizhu Chen. Tora: A tool-integrated reasoning agent for mathematical problem solving. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=Ep0TjVoap.", + "Sachin Goyal, Ziwei Ji, Ankit Singh Rawat, Aditya Krishna Menon, Sanjiv Kumar, and Vaishnavh Nagarajan. Think before you speak: Training language models with pause tokens. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=ph04CRkPdC.", + "Nate Gruver, Marc Anton Finzi, Shikai Qiu, and Andrew Gordon Wilson. Large language models are zero-shot time series forecasters. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=md68e8iZK1.", + "Zhengyao Gu, Henry Peng Zou, Yankai Chen, Aiwei Liu, Weizhi Zhang, and Philip S Yu. Semi-supervised in-context learning: A baseline study. arXiv preprint arXiv:2503.03062, 2025.", + "Xinyu Guan, Li Lyna Zhang, Yifei Liu, Ning Shang, Youran Sun, Yi Zhu, Fan Yang, and Mao Yang. rstar-math: Small llms can master math reasoning with self-evolved deep thinking. arXiv preprint arXiv: 2501.04519, 2025.", + "Jiaxian Guo, Bo Yang, Paul Yoo, Bill Yuchen Lin, Yusuke Iwasawa, and Yutaka Matsuo. Suspicion-agent: Playing imperfect information games with theory of mind aware gpt-4. arXiv preprint arXiv:2309.17277, 2023.", + "Qingyan Guo, Rui Wang, Junliang Guo, Bei Li, Kaitao Song, Xu Tan, Guoqing Liu, Jiang Bian, and Yujiu Yang. Connecting large language models with evolutionary algorithms yields powerful prompt optimizers. In The Twelfth International Conference on Learning Representations, 2024a. URL https://openreview.net/forum?id=ZG3RaNIs08.", + "Taicheng Guo, Xiuying Chen, Yaqi Wang, Ruidi Chang, Shichao Pei, Nitesh V Chawla, Olaf Wiest, and Xiangliang Zhang. Large language model based multi-agents: A survey of progress and challenges. arXiv preprint arXiv:2402.01680, 2024b." + ], + "bbox": [ + 112, + 102, + 883, + 926 + ], + "page_idx": 46 + }, + { + "type": "page_number", + "text": "47", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 46 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Zakaria Hammane, Fatima-Ezzahraa Ben-Bouazza, and Abdelhadi Fennan. Selfrewarddrag: Enhancing medical reasoning with retrieval-augmented generation and self-evaluation in large language models. In 2024 International Conference on Intelligent Systems and Computer Vision (ISCV), pp. 1-8. IEEE, 2024.", + "Simeng Han, Hailey Schoelkopf, Yilun Zhao, Zhenting Qi, Martin Riddell, Wenfei Zhou, James Coady, David Peng, Yujie Qiao, Luke Benson, et al. Folio: Natural language reasoning with first-order logic. arXiv preprint arXiv:2209.00840, 2022.", + "Simeng Han, Aaron Yu, Rui Shen, Zhenting Qi, Martin Riddell, Wenfei Zhou, Yujie Qiao, Yilun Zhao, Semih Yavuz, Ye Liu, Shafiq Joty, Yingbo Zhou, Caiming Xiong, Dragomir Radev, Rex Ying, and Arman Cohen. P-FOLIO: Evaluating and improving logical reasoning with abundant human-written reasoning chains. In Findings of the Association for Computational Linguistics: EMNLP 2024, pp. 16553-16565, Miami, Florida, USA, November 2024a. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-emnlp.966. URL https://aclanthology.org/2024-findings-emnlp.966/.", + "Simon Jerome Han, Keith J Ransom, Andrew Perfors, and Charles Kemp. Inductive reasoning in humans and large language models. Cognitive Systems Research, 83:101155, 2024b.", + "Shibo Hao, Yi Gu, Haodi Ma, Joshua Jiahua Hong, Zhen Wang, Daisy Zhe Wang, and Zhiting Hu. Reasoning with language model is planning with world model. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, EMNLP 2023, Singapore, December 6-10, 2023, pp. 8154-8173. Association for Computational Linguistics, 2023. doi: 10.18653/V1/2023.EMNLP-MAIN.507. URL https://doi.org/10.18653/v1/2023.emnlp-main.507.", + "Shibo Hao, Yi Gu, Haotian Luo, Tianyang Liu, Xiyuan Shao, Xinyuan Wang, Shuhua Xie, Haodi Ma, Adithya Samavedhi, Qiyue Gao, et al. Llm reasoners: New evaluation, library, and analysis of step-by-step reasoning with large language models. arXiv preprint arXiv:2404.05221, 2024a.", + "Shibo Hao, Sainbayar Sukhbaatar, DiJia Su, Xian Li, Zhiting Hu, Jason Weston, and Yuandong Tian. Training large language models to reason in a continuous latent space. CoRR, abs/2412.06769, 2024b. URL https://doi.org/10.48550/arXiv.2412.06769.", + "Peter Hase, Shiyue Zhang, Harry Xie, and Mohit Bansal. Leakage-adjusted simulatability: Can models generate non-trivial explanations of their behavior in natural language? arXiv preprint arXiv:2010.04119, 2020.", + "Michael Hassid, Tal Remez, Jonas Gehring, Roy Schwartz, and Yossi Adi. The larger the better? improved llm code-generation via budget reallocation. arXiv preprint arXiv:2404.00725, 2024.", + "Alex Havrilla, Yuqing Du, Sharath Chandra Raparthy, Christoforos Nalmpantis, Jane Dwivedi-Yu, Maksym Zhuravinskyi, Eric Hambro, Sainbayar Sukhbaatar, and Roberta Raileanu. Teaching large language models to reason with reinforcement learning. arXiv preprint arXiv:2403.04642, 2024.", + "Jiabang He, Lei Wang, Yi Hu, Ning Liu, Hui Liu, Xing Xu, and Heng Tao Shen. Icl-d3ie: In-context learning with diverse demonstrations updating for document information extraction. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 19485-19494, 2023.", + "Jinwei He and Feng Lu. Causejudger: Identifying the cause with llms for abductive logical reasoning. arXiv preprint arXiv:2409.05559, 2024.", + "Dan Hendrycks, Steven Basart, Saurav Kadavath, Mantas Mazeika, Akul Arora, Ethan Guo, Collin Burns, Samir Puranik, Horace He, Dawn Song, and Jacob Steinhardt. Measuring coding challenge competence with apps. NeurIPS, 2021a.", + "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the math dataset. arXiv preprint arXiv:2103.03874, 2021b." + ], + "bbox": [ + 112, + 102, + 883, + 924 + ], + "page_idx": 47 + }, + { + "type": "page_number", + "text": "48", + "bbox": [ + 488, + 948, + 508, + 959 + ], + "page_idx": 47 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Jonathan Herzig, Pawel Krzysztof Nowak, Thomas Müller, Francesco Piccinno, and Julian Eisenschlos. TaPas: Weakly supervised table parsing via pre-training. In Dan Jurafsky, Joyce Chai, Natalie Schluter, and Joel Tetreault (eds.), Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pp. 4320-4333, Online, July 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.acl-main.398. URL https://aclanthology.org/2020.acl-main.398/.", + "Keith J Holyoak. Analogy and relational reasoning. The Oxford handbook of thinking and reasoning, pp. 234-259, 2012. URL https://psycnet.apa.org/record/2012-08871-013.", + "Jiwoo Hong, Noah Lee, and James Thorne. Orpo: Monolithic preference optimization without reference model. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pp. 11170-11189, 2024.", + "Sirui Hong, Xiawu Zheng, Jonathan Chen, Yuheng Cheng, Jinlin Wang, Ceyao Zhang, Zili Wang, Steven Ka Shing Yau, Zijuan Lin, Liyang Zhou, et al. Metagpt: Meta programming for multi-agent collaborative framework. arXiv preprint arXiv:2308.00352, 3(4):6, 2023.", + "Xinyi Hou, Yanjie Zhao, Shenao Wang, and Haoyu Wang. Model context protocol (mcp): Landscape, security threats, and future research directions. arXiv preprint arXiv:2503.23278, 2025.", + "Shengran Hu, Cong Lu, and Jeff Clune. Automated design of agentic systems. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=t9U3LW7JVX.", + "Xinyu Hu, Li Lin, Mingqi Gao, Xunjian Yin, and Xiaojun Wan. Themis: A reference-free nlg evaluation language model with flexibility and interpretability. arXiv preprint arXiv:2406.18365, 2024.", + "Jie Huang and Kevin Chen-Chuan Chang. Towards reasoning in large language models: A survey. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Findings of the Association for Computational Linguistics: ACL 2023, pp. 1049–1065, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.findings-acl.67. URL https://aclanthology.org/2023-findings-acl.67/.", + "Jie Huang, Xinyun Chen, Swaroop Mishra, Huaixiu Steven Zheng, Adams Wei Yu, Xinying Song, and Denny Zhou. Large language models cannot self-correct reasoning yet. In The Twelfth International Conference on Learning Representations, 2024a. URL https://openreview.net/forum?id=IkmD3fKBPQ.", + "Siming Huang, Tianhao Cheng, J. K. Liu, Jiaran Hao, Liuyihan Song, Yang Xu, J. Yang, J. H. Liu, Chenchen Zhang, Linzheng Chai, Ruifeng Yuan, Zhaoxiang Zhang, Jie Fu, Qian Liu, Ge Zhang, Zili Wang, Yuan Qi, Yinghui Xu, and Wei Chu. Opencoder: The open cookbook for top-tier code large language models. CoRR, abs/2411.04905, 2024b. doi: 10.48550/ARXIV.2411.04905. URL https://doi.org/10.48550/arXiv.2411.04905.", + "Yuncheng Huang, Qianyu He, Yipei Xu, Jiaqing Liang, and Yanghua Xiao. Laying the foundation first? investigating the generalization from atomic skills to complex reasoning tasks, 2024c. URL https:// arxiv.org/abs/2403.09479.", + "Zhen Huang, Haoyang Zou, Xuefeng Li, Yixiu Liu, Yuxiang Zheng, Ethan Chern, Shijie Xia, Yiwei Qin, Weizhe Yuan, and Pengfei Liu. O1 replication journey-part 2: Surpassing o1-preview through simple distillation, big progress or bitter lesson? arXiv preprint arXiv:2411.16489, 2024d.", + "Binyuan Hui, Jian Yang, Zeyu Cui, Jiaxi Yang, Dayiheng Liu, Lei Zhang, Tianyu Liu, Jiajun Zhang, Bowen Yu, Keming Lu, et al. Qwen2. 5-coder technical report. arXiv preprint arXiv:2409.12186, 2024.", + "Michael Huth and Mark Ryan. Logic in computer science: Modelling and reasoning about systems. Cambridge university press., 86, 2004.", + "Naman Jain, King Han, Alex Gu, Wen-Ding Li, Fanjia Yan, Tianjun Zhang, Sida Wang, Armando Solar-Lezama, Koushik Sen, and Ion Stoica. Livocodebench: Holistic and contamination free evaluation of large language models for code. arXiv preprint arXiv:2403.07974, 2024." + ], + "bbox": [ + 112, + 102, + 883, + 925 + ], + "page_idx": 48 + }, + { + "type": "page_number", + "text": "49", + "bbox": [ + 488, + 948, + 508, + 959 + ], + "page_idx": 48 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Harold Jeffreys. An invariant form for the prior probability in estimation problems. Proceedings of the Royal Society of London. Series A, Mathematical and Physical Sciences, 186:453-461, 1946. doi: 10.1098/rspa.1946.0056. URL http://doi.org/10.1098/rspa.1946.0056.", + "Albert Q. Jiang, Wenda Li, and Mateja Jamnik. Multi-language diversity benefits autoformalization. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024a. URL https://openreview.net/forum?id=2jjfRm2R6D.", + "Jinhao Jiang, Kun Zhou, Zican Dong, Keming Ye, Xin Zhao, and Ji-Rong Wen. StructGPT: A general framework for large language model to reason over structured data. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pp. 9237-9251, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.574. URL https://aclanthology.org/2023.emnlp-main.574/.", + "Jinhao Jiang, Jiayi Chen, Junyi Li, Ruiyang Ren, Shijie Wang, Wayne Xin Zhao, Yang Song, and Tao Zhang. Rag-star: Enhancing deliberative reasoning with retrieval augmented verification and refinement. arXiv preprint arXiv:2412.12881, 2024b.", + "Fangkai Jiao, Chengwei Qin, Zhengyuan Liu, Nancy Chen, and Shafiq Joty. Learning planning-based reasoning by trajectories collection and process reward synthesizing. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (eds.), Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, EMNLP 2024, Miami, FL, USA, November 12-16, 2024, pp. 334-350. Association for Computational Linguistics, 2024a. URL https://aclanthology.org/2024.emnlp-main.20.", + "Fangkai Jiao, Zhiyang Teng, Bosheng Ding, Zhengyuan Liu, Nancy F. Chen, and Shafiq Joty. Exploring self-supervised logic-enhanced training for large language models. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), NAACL 2024, Mexico City, Mexico, June 16-21, 2024, pp. 926-941. Association for Computational Linguistics, 2024b.", + "Fangkai Jiao, Geyang Guo, Xingxing Zhang, Nancy F. Chen, Shafiq Joty, and Furu Wei. Preference optimization for reasoning with pseudo feedback. In ICLR. OpenReview.net, 2025.", + "Carlos E. Jimenez, John Yang, Alexander Wettig, Shunyu Yao, Kexin Pei, Ofir Press, and Karthik R. Narasimhan. Swe-bench: Can language models resolve real-world github issues? In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=VTF8yNQM66.", + "Jaehun Jung, Lianhui Qin, Sean Welleck, Faeze Brahman, Chandra Bhagavatula, Ronan Le Bras, and Yejin Choi. Maieutic prompting: Logically consistent reasoning with recursive explanations. In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang (eds.), Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 1266-1279, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.emnlp-main.82. URL https://aclanthology.org/2022.emnlp-main.82/.", + "Jaehun Jung, Faeze Brahman, and Yejin Choi. Trust or escalate: Llm judges with provable guarantees for human agreement. arXiv preprint arXiv:2407.18370, 2024.", + "Katie Kang, Amrith Setlur, Dibya Ghosh, Jacob Steinhardt, Claire Tomlin, Sergey Levine, and Aviral Kumar. What do learning dynamics reveal about generalization in llm reasoning?, 2024. URL https://arxiv.org/abs/2411.07681.", + "Sayash Kapoor, Benedikt Stroebl, Zachary S Siegel, Nitya Nadgir, and Arvind Narayanan. Ai agents that matter. arXiv preprint arXiv:2407.01502, 2024.", + "Amirhossein Kazemnejad, Milad Aghajohari, Eva Portelance, Alessandro Sordoni, Siva Reddy, Aaron Courville, and Nicolas Le Roux. Vineppo: Unlocking rl potential for llm reasoning through refined credit assignment. arXiv preprint arXiv:2410.01679, 2024." + ], + "bbox": [ + 112, + 102, + 883, + 925 + ], + "page_idx": 49 + }, + { + "type": "page_number", + "text": "50", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 49 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Zixuan Ke and Bing Liu. Continual learning of natural language processing tasks: A survey, 2023. URL https://arxiv.org/abs/2211.12701.", + "Zixuan Ke, Yijia Shao, Haowei Lin, Tatsuya Konishi, Gyuhak Kim, and Bing Liu. Continual pre-training of language models, 2023. URL https://arxiv.org/abs/2302.03241.", + "Zixuan Ke, Weize Kong, Cheng Li, Mingyang Zhang, Qiaozhu Mei, and Michael Bendersky. Bridging the preference gap between retrievers and llms, 2024. URL https://arxiv.org/abs/2401.06954.", + "Zixuan Ke, Yifei Ming, and Shafiq Joty. Adaptation of large language models. In Maria Lomeli, Swabha Swayamdipta, and Rui Zhang (eds.), Proceedings of the 2025 Annual Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 5: Tutorial Abstracts), pp. 30-37, Albuquerque, New Mexico, May 2025a. Association for Computational Linguistics. ISBN 979-8-89176-193-3. doi: 10.18653/v1/2025.naacl-tutorial.5. URL https://aclanthology.org/2025.naacl-tutorial.5/.", + "Zixuan Ke, Yifei Ming, Xuan-Phi Nguyen, Caiming Xiong, and Shafiq Joty. Demystifying domain-adaptive post-training for financial llms. arXiv preprint arXiv:2501.04961, 2025b.", + "Zixuan Ke, Austin Xu, Yifei Ming, Xuan-Phi Nguyen, Caiming Xiong, and Shafiq Joty. Mas-zero: Designing multi-agent systems with zero supervision, 2025c. URL https://arxiv.org/abs/2505.14996.", + "Omar Khattab, Keshav Santhanam, Xiang Lisa Li, David Hall, Percy Liang, Christopher Potts, and Matei Zaharia. Demonstrate-search-predict: Composing retrieval and language models for knowledge-intensive nlp. arXiv preprint arXiv:2212.14024, 2022.", + "Tushar Khot, Harsh Trivedi, Matthew Finlayson, Yao Fu, Kyle Richardson, Peter Clark, and Ashish Sabharwal. Decomposed prompting: A modular approach for solving complex tasks. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=nGgzQjzaRy.", + "Dongkwan Kim, Junho Myung, and Alice Oh. Salad-bowl-LLM: Multi-culture LLMs by in-context demonstrations from diverse cultures. In Workshop on Socially Responsible Language Modelling Research, 2024a. URL https://openreview.net/forum?id=KsAfPGPZZn.", + "Seungone Kim, Jamin Shin, Yejin Cho, Joel Jang, Shayne Longpre, Hwaran Lee, Sangdoo Yun, Seongjin Shin, Sungdong Kim, James Thorne, et al. Prometheus: Inducing fine-grained evaluation capability in language models. In The Twelfth International Conference on Learning Representations, 2023.", + "Seungone Kim, Juyoung Suk, Ji Yong Cho, Shayne Longpre, Chaeun Kim, Dongkeun Yoon, Guijin Son, Yejin Cho, Sheikh Shafayat, Jinheon Baek, et al. The biggen bench: A principled benchmark for fine-grained evaluation of language models with language models. arXiv preprint arXiv:2406.05761, 2024b.", + "Seungone Kim, Juyoung Suk, Shayne Longpre, Bill Yuchen Lin, Jamin Shin, Sean Welleck, Graham Neubig, Moontae Lee, Kyungjae Lee, and Minjoon Seo. Prometheus 2: An open source language model specialized in evaluating other language models. arXiv preprint arXiv:2405.01535, 2024c.", + "Sunghwan Kim, Dongjin Kang, Taeyoon Kwon, Hyungjoo Chae, Jungsoo Won, Dongha Lee, and Jinyoung Yeo. Evaluating robustness of reward models for mathematical reasoning, 2024d. URL https://arxiv.org/abs/2410.01729.", + "Takeshi Kojima, Shixiang Shane Gu, Machel Reid, Yutaka Matsuo, and Yusuke Iwasawa. Large language models are zero-shot reasoners. Advances in neural information processing systems, 35:22199-22213, 2022.", + "Wouter Kool, Herke van Hoof, and Max Welling. Buy 4 reinforce samples, get a baseline for free! 2019.", + "Michal Kosinski. Evaluating large language models in theory of mind tasks. Proceedings of the National Academy of Sciences, 121(45):e2405460121, 2024." + ], + "bbox": [ + 112, + 102, + 883, + 926 + ], + "page_idx": 50 + }, + { + "type": "page_number", + "text": "51", + "bbox": [ + 488, + 948, + 506, + 960 + ], + "page_idx": 50 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Julia Kreutzer, Artem Sokolov, and Stefan Riezler. Bandit structured prediction for neural sequence-to-sequence learning. arXiv preprint arXiv:1704.06497, 2017.", + "Aviral Kumar, Vincent Zhuang, Rishabh Agarwal, Yi Su, John D. Co-Reyes, Avi Singh, Kate Baumli, Shariq Iqbal, Colton Bishop, Rebecca Roelofs, Lei M. Zhang, Kay McKinney, Disha Shrivastava, Cosmin Paduraru, George Tucker, Doina Precup, Feryal M. P. Behbahani, and Aleksandra Faust. Training language models to self-correct via reinforcement learning. CoRR, abs/2409.12917, 2024. doi: 10.48550/ARXIV.2409.12917. URL https://doi.org/10.48550/arXiv.2409.12917.", + "Xin Lai, Zhuotao Tian, Yukang Chen, Senqiao Yang, Xiangru Peng, and Jiaya Jia. Step-dpo: Step-wise preference optimization for long-chain reasoning of llms. CoRR, abs/2406.18629, 2024. doi: 10.48550/ ARXIV.2406.18629. URL https://doi.org/10.48550/arXiv.2406.18629.", + "Nathan Lambert, Jacob Morrison, Valentina Pyatkin, Shengyi Huang, Hamish Ivison, Faeze Brahman, Lester James V. Miranda, Alisa Liu, Nouha Dziri, Shane Lyu, Yuling Gu, Saumya Malik, Victoria Graf, Jena D. Hwang, Jiangjiang Yang, Ronan Le Bras, Oyvind Tafjord, Chris Wilhelm, Luca Soldaini, Noah A. Smith, Yizhong Wang, Pradeep Dasigi, and Hannaneh Hajishirzi. Tülu 3: Pushing frontiers in open language model post-training. 2024.", + "Qiangfeng Peter Lau, Mong-Li Lee, and Wynne Hsu. Coordination guided reinforcement learning. In AAMAS, pp. 215-222, 2012.", + "Harrison Lee, Samrat Phatale, Hassan Mansoor, Kellie Ren Lu, Thomas Mesnard, Johan Ferret, Colton Bishop, Ethan Hall, Victor Carbune, and Abhinav Rastogi. Rlaif: Scaling reinforcement learning from human feedback with ai feedback. 2023.", + "Sangmin Lee, Minzhi Li, Bolin Lai, Wenqi Jia, Fiona Ryan, Xu Cao, Ozgur Kara, Bikram Boote, Weiyan Shi, Diyi Yang, et al. Towards social ai: A survey on understanding social interactions. arXiv preprint arXiv:2409.15316, 2024.", + "Itay Levy, Ben Boin, and Jonathan Berant. Diverse demonstrations improve in-context compositional generalization. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 1401-1422, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.78. URL https://aclanthology.org/2023.acl-long.78/.", + "Chengshu Li, Jacky Liang, Andy Zeng, Xinyun Chen, Karol Hausman, Dorsa Sadigh, Sergey Levine, Li Fei-Fei, Fei Xia, and Brian Ichter. Chain of code: Reasoning with a language model-augmented code emulator. arXiv preprint arXiv:2312.04474, 2023a.", + "Haoran Li, Qingxiu Dong, Zhengyang Tang, Chaojun Wang, Xingxing Zhang, Haoyang Huang, Shaohan Huang, Xiaolong Huang, Zeqiang Huang, Dongdong Zhang, Yuxian Gu, Xin Cheng, Xun Wang, Si-Qing Chen, Li Dong, Wei Lu, Zhifang Sui, Benyou Wang, Wai Lam, and Furu Wei. Synthetic data (almost) from scratch: Generalized instruction tuning for language models. CoRR, abs/2402.13064, 2024a. URL https://doi.org/10.48550/arXiv.2402.13064.", + "Junlong Li, Shichao Sun, Weizhe Yuan, Run-Ze Fan, Hai Zhao, and Pengfei Liu. Generative judge for evaluating alignment. arXiv preprint arXiv:2310.05470, 2023b.", + "Junyou Li, Qin Zhang, Yangbin Yu, Qiang Fu, and Deheng Ye. More agents is all you need. Transactions on Machine Learning Research, 2024b. ISSN 2835-8856. URL https://openreview.net/forum?id=bgzUSZ8aeg.", + "Ming Li, Jiuhai Chen, Lichang Chen, and Tianyi Zhou. Can llms speak for diverse people? tuning llms via debate to generate controllable controversial statements. arXiv preprint arXiv:2402.10614, 2024c.", + "Minzhi Li, Weiyan Shi, Caleb Ziems, and Diyi Yang. Social intelligence data infrastructure: Structuring the present and navigating the future. arXiv preprint arXiv:2403.14659, 2024d." + ], + "bbox": [ + 112, + 102, + 883, + 925 + ], + "page_idx": 51 + }, + { + "type": "page_number", + "text": "52", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 51 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Minzhi Li, Zhengyuan Liu, Shumin Deng, Shafiq Joty, Nancy Chen, and Min-Yen Kan. Dna-eval: Enhancing large language model evaluation through decomposition and aggregation. In Proceedings of the 31st International Conference on Computational Linguistics, pp. 2277-2290, 2025a.", + "Mukai Li, Shansan Gong, Jiangtao Feng, Yiheng Xu, Jun Zhang, Zhiyong Wu, and Lingpeng Kong. Incontext learning with many demonstration examples. arXiv preprint arXiv:2302.04931, 2023c.", + "Ruosen Li, Teerth Patel, and Xinya Du. Prd: Peer rank and discussion improve large language model based evaluations. arXiv preprint arXiv:2307.02762, 2023d.", + "Sheng Li, Jayesh K Gupta, Peter Morales, Ross Allen, and Mykel J Kochenderfer. Deep implicit coordination graphs for multi-agent reinforcement learning. arXiv preprint arXiv:2006.11438, 2020.", + "Xiaonan Li, Kai Lv, Hang Yan, Tianyang Lin, Wei Zhu, Yuan Ni, Guotong Xie, Xiaoling Wang, and Xipeng Qiu. Unified demonstration retriever for in-context learning. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 4644-4668, Toronto, Canada, July 2023e. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.256. URL https://aclanthology.org/2023.acl-long.256/.", + "Xingxuan Li, Ruochen Zhao, Yew Ken Chia, Bosheng Ding, Shafiq Joty, Soujanya Poria, and Lidong Bing. Chain-of-knowledge: Grounding large language models via dynamic knowledge adapting over heterogeneous sources, 2024e. URL https://arxiv.org/abs/2305.13269.", + "Yang Li, Wenhao Zhang, Jianhong Wang, Shao Zhang, Yali Du, Ying Wen, and Wei Pan. Aligning individual and collective objectives in multi-agent cooperation. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024f. URL https://openreview.net/forum?id=2YSHEBRRol.", + "Yujia Li, David Choi, Junyoung Chung, Nate Kushman, Julian Schrittwieser, Rémi Leblond, Tom Eccles, James Keeling, Felix Gimeno, Agustin Dal Lago, Thomas Hubert, Peter Choy, Cyprien de Masson d'Autume, Igor Babuschkin, Xinyun Chen, Po-Sen Huang, Johannes Welbl, Sven Gowal, Alexey Cherepanov, James Molloy, Daniel J. Mankowitz, Esme Sutherland Robson, Pushmeet Kohli, Nando de Freitas, Koray Kavukcuoglu, and Oriol Vinyals. Competition-level code generation with alphabet. Science, 378(6624):1092-1097, December 2022. ISSN 1095-9203. doi: 10.1126/science.abq1158. URL http://dx.doi.org/10.1126/science.abq1158.", + "Zenan Li, Zhaoyu Li, Wen Tang, Xian Zhang, Yuan Yao, Xujie Si, Fan Yang, Kaiyu Yang, and Xiaoxing Ma. Proving olympiad inequalities by synergizing LLMs and symbolic reasoning. In The Thirteenth International Conference on Learning Representations, 2025b. URL https://openreview.net/forum?id=FiyS0ecSm0.", + "Zhaoyi Li, Gangwei Jiang, Hong Xie, Linqi Song, Defu Lian, and Ying Wei. Understanding and patching compositional reasoning in LLMs. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Findings of the Association for Computational Linguistics: ACL 2024, pp. 9668-9688, Bangkok, Thailand, August 2024g. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-acl.576. URL https://aclanthology.org/2024-findings-acl.576/.", + "Zhiyuan Li, Hong Liu, Denny Zhou, and Tengyu Ma. Chain of thought empowers transformers to solve inherently serial problems. In The Twelfth International Conference on Learning Representations, 2024h. URL https://openreview.net/forum?id=3EWTEy9MTM.", + "Percy Liang, Rishi Bommasani, Tony Lee, Dimitris Tsipras, Dilara Soylu, Michihiro Yasunaga, Yian Zhang, Deepak Narayanan, Yuhuai Wu, Ananya Kumar, Benjamin Newman, Binhang Yuan, Bobby Yan, Ce Zhang, Christian Alexander Cosgrove, Christopher D Manning, Christopher Re, Diana Acosta-Navas, Drew Arad Hudson, Eric Zelikman, Esin Durmus, Faisal Ladhak, Frieda Rong, Hongyu Ren, Huaxiu Yao, Jue WANG, Keshav Santhanam, Laurel Orr, Lucia Zheng, Mert Yuksekgonul, Mirac Suzgun, Nathan Kim, Neel Guha, Niladri S. Chatterji, Omar Khattab, Peter Henderson, Qian Huang, Ryan Andrew Chi, Sang Michael Xie, Shibani Santurkar, Surya Ganguli, Tatsunori Hashimoto, Thomas Icard, Tianyi Zhang," + ], + "bbox": [ + 112, + 102, + 883, + 925 + ], + "page_idx": 52 + }, + { + "type": "page_number", + "text": "53", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 52 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Vishrav Chaudhary, William Wang, Xuechen Li, Yifan Mai, Yuhui Zhang, and Yuta Koreeda. Holistic evaluation of language models. Transactions on Machine Learning Research, 2023a. ISSN 2835-8856. URL https://openreview.net/forum?id=i04LZibEqW. Featured Certification, Expert Certification.", + "Tian Liang, Zhiwei He, Wenxiang Jiao, Xing Wang, Yan Wang, Rui Wang, Yujiu Yang, Shuming Shi, and Zhaopeng Tu. Encouraging divergent thinking in large language models through multi-agent debate. arXiv preprint arXiv:2305.19118, 2023b.", + "Yancheng Liang, Daphne Chen, Abhishek Gupta, Simon Shaolei Du, and Natasha Jaques. Learning to cooperate with humans using generative agents. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. URL https://openreview.net/forum?id=v4dXL3LsGX.", + "Hunter Lightman, Vineet Kosaraju, Yuri Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=v8LOpN6EOi.", + "Bill Yuchen Lin, Seyeon Lee, Xiaoyang Qiao, and Xiang Ren. Common sense beyond English: Evaluating and improving multilingual language models for commonsense reasoning. In Chengqing Zong, Fei Xia, Wenjie Li, and Roberto Navigli (eds.), Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pp. 1274-1287, Online, August 2021. Association for Computational Linguistics. doi: 10.18653/v1/2021.acl-long.102. URL https://aclanthology.org/2021.acl-long.102/.", + "Weizhe Lin, Rexhina Blloshmi, Bill Byrne, Adria de Gispert, and Gonzalo Iglesias. An inner table retriever for robust table question answering. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 9909–9926, Toronto, Canada, July 2023a. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.551. URL https://aclanthology.org/2023.acl-long.551/.", + "Weizhe Lin, Rexhina Blloshmi, Bill Byrne, Adria de Gispert, and Gonzalo Iglesias. LI-RAGE: Late interaction retrieval augmented generation with explicit signals for open-domain table question answering. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pp. 1557-1566, Toronto, Canada, July 2023b. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-short.133. URL https://aclanthology.org/2023.acl-short.133/.", + "Chris Yuhao Liu, Liang Zeng, Jiacai Liu, Rui Yan, Jujie He, Chaojie Wang, Shuicheng Yan, Yang Liu, and Yahui Zhou. Skywork-reward: Bag of tricks for reward modeling in llms. arXiv preprint arXiv:2410.18451, 2024a.", + "Hanmeng Liu, Zhizhang Fu, Mengru Ding, Ruoxi Ning, Chaoli Zhang, Xiaozhang Liu, and Yue Zhang. Logical reasoning in large language models: A survey. arXiv preprint arXiv:2502.09100, 2025a.", + "Jiachang Liu, Dinghan Shen, Yizhe Zhang, Bill Dolan, Lawrence Carin, and Weizhu Chen. What makes good in-context examples for GPT-3? In Eneko Agirre, Marianna Apidianaki, and Ivan Vulić (eds.), Proceedings of Deep Learning Inside Out (DeeLIO 2022): The 3rd Workshop on Knowledge Extraction and Integration for Deep Learning Architectures, pp. 100–114, Dublin, Ireland and Online, May 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.deelio-1.10. URL https://aclanthology.org/2022.deelio-1.10/.", + "Liang Liu, Dong Zhang, Shoushan Li, Guodong Zhou, and Erik Cambria. Two heads are better than one: Zero-shot cognitive reasoning via multi-llm knowledge fusion. In Proceedings of the 33rd ACM International Conference on Information and Knowledge Management, pp. 1462–1472, 2024b.", + "Ryan Liu, Jiayi Geng, Addison J. Wu, Ilia Sucholutsky, Tania Lombrozo, and Thomas L. Griffiths. Mind your step (by step): Chain-of-thought can reduce performance on tasks where thinking makes humans worse, 2024c. URL https://arxiv.org/abs/2410.21333." + ], + "bbox": [ + 112, + 102, + 883, + 924 + ], + "page_idx": 53 + }, + { + "type": "page_number", + "text": "54", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 53 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Tianyang Liu, Fei Wang, and Muhao Chen. Rethinking tabular data understanding with large language models. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 450-482, Mexico City, Mexico, June 2024d. Association for Computational Linguistics. doi: 10.18653/v1/2024.naacl-long.26. URL https://aclanthology.org/2024.naacl-long.26/.", + "Tongxuan Liu, Xingyu Wang, Weizhe Huang, Wenjiang Xu, Yuting Zeng, Lei Jiang, Hailong Yang, and Jing Li. Groupdebate: Enhancing the efficiency of multi-agent debate using group discussion. arXiv preprint arXiv:2409.14051, 2024e.", + "Yanchen Liu, Srishti Gautam, Jiaqi Ma, and Himabindu Lakkaraju. Investigating the fairness of large language models for predictions on tabular data. In *Socially Responsible Language Modelling Research*, 2023. URL https://openreview.net/forum?id=V1740FqidS.", + "Yantao Liu, Zijun Yao, Rui Min, Yixin Cao, Lei Hou, and Juanzi Li. Pairwise rm: Perform best-of-n sampling with knockout tournament. arXiv preprint arXiv:2501.13007, 2025b.", + "Zhihan Liu, Hao Hu, Shenao Zhang, Hongyi Guo, Shuqi Ke, Boyi Liu, and Zhaoran Wang. Reason for future, act for now: A principled architecture for autonomous LLM agents. In Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pp. 31186-31261. PMLR, 21-27 Jul 2024f.", + "Zichen Liu, Changyu Chen, Wenjun Li, Tianyu Pang, Chao Du, and Min Lin. There may not be a hah moment in r1-zero-like training — a pilot study. https://oatllm.notion.site/oat-zero, 2025c. Notion Blog.", + "Do Xuan Long, Hai Nguyen Ngoc, Tiviatis Sim, Hieu Dao, Shafiq Joty, Kenji Kawaguchi, Nancy F Chen, and Min-Yen Kan. Llms are biased towards output formats! systematically evaluating and mitigating output format bias of llms. arXiv preprint arXiv:2408.08656, 2024a.", + "Do Xuan Long, Duong Ngoc Yen, Anh Tuan Luu, Kenji Kawaguchi, Min-Yen Kan, and Nancy F. Chen. Multi-expert prompting improves reliability, safety and usefulness of large language models. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (eds.), Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pp. 20370-20401, Miami, Florida, USA, November 2024b. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.1135. URL https://aclanthology.org/2024.emnlp-main.1135/.", + "Do Xuan Long, Yiran Zhao, Hannah Brown, Yuxi Xie, James Zhao, Nancy Chen, Kenji Kawaguchi, Michael Shieh, and Junxian He. Prompt optimization via adversarial in-context learning. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 7308-7327, Bangkok, Thailand, August 2024c. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.395. URL https://aclanthology.org/2024.acl-long.395/.", + "Pan Lu, Baolin Peng, Hao Cheng, Michel Galley, Kai-Wei Chang, Ying Nian Wu, Song-Chun Zhu, and Jianfeng Gao. Chameleon: Plug-and-play compositional reasoning with large language models. Advances in Neural Information Processing Systems, 36, 2024a.", + "Weizheng Lu, Jing Zhang, Ju Fan, Zihao Fu, Yueguo Chen, and Xiaoyong Du. Large language model for table processing: A survey. Frontiers of Computer Science, 19(2):192350, 2025.", + "Xinyuan Lu, Liangming Pan, Yubo Ma, Preslav Nakov, and Min-Yen Kan. Tart: An open-source tool-augmented framework for explainable table-based reasoning. arXiv preprint arXiv:2409.11724, 2024b.", + "Haipeng Luo, Qingfeng Sun, Can Xu, Pu Zhao, Jianguang Lou, Chongyang Tao, Xiubo Geng, Qingwei Lin, Shifeng Chen, and Dongmei Zhang. Wizardmath: Empowering mathematical reasoning for large language models via reinforced evol-instruct. arXiv preprint arXiv:2308.09583, 2023a." + ], + "bbox": [ + 112, + 102, + 883, + 926 + ], + "page_idx": 54 + }, + { + "type": "page_number", + "text": "55", + "bbox": [ + 488, + 946, + 508, + 960 + ], + "page_idx": 54 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Haipeng Luo, Qingfeng Sun, Can Xu, Pu Zhao, Jianguang Lou, Chongyang Tao, Xiubo Geng, Qingwei Lin, Shifeng Chen, and Dongmei Zhang. Wizardmath: Empowering mathematical reasoning for large language models via reinforced evol-instruct. CoRR, abs/2308.09583, 2023b. doi: 10.48550/ARXIV.2308.09583. URL https://doi.org/10.48550/arXiv.2308.09583.", + "Kangyang Luo, Zichen Ding, Zhenmin Weng, Lingfeng Qiao, Meng Zhao, Xiang Li, Di Yin, and Jinlong Shu. Let's be self-generated via step by step: A curriculum learning approach to automated reasoning with large language models. arXiv preprint arXiv:2410.21728, 2024a. URL https://arxiv.org/abs/2410.21728.", + "Liangchen Luo, Yinxiao Liu, Rosanne Liu, Samrat Phatale, Harsh Lara, Yunxuan Li, Lei Shu, Yun Zhu, Lei Meng, Jiao Sun, and Abhinav Rastogi. Improve mathematical reasoning in language models by automated process supervision. CoRR, abs/2406.06592, 2024b. doi: 10.48550/ARXIV.2406.06592. URL https://doi.org/10.48550/arXiv.2406.06592.", + "Liangchen Luo, Yinxiao Liu, Rosanne Liu, Samrat Phatale, Harsh Lara, Yunxuan Li, Lei Shu, Yun Zhu, Lei Meng, Jiao Sun, et al. Improve mathematical reasoning in language models by automated process supervision. arXiv preprint arXiv:2406.06592, 2024c.", + "Man Luo, Xin Xu, Zhuyun Dai, Panupong Pasupat, Mehran Kazemi, Chitta Baral, Vaiva Imbrasaite, and Vincent Y Zhao. Dr. icl: Demonstration-retrieved in-context learning. arXiv preprint arXiv:2305.14128, 2023c.", + "Man Luo, Xin Xu, Yue Liu, Panupong Pasupat, and Mehran Kazemi. In-context learning with retrieved demonstrations for language models: A survey. Transactions on Machine Learning Research, 2024d. ISSN 2835-8856. URL https://openreview.net/forum?id=NQPo8ZhQPa. Survey Certification.", + "Qing Lyu, Shreya Havaldar, Adam Stein, Li Zhang, Delip Rao, Eric Wong, Marianna Apidianaki, and Chris Callison-Burch. Faithful chain-of-thought reasoning. In Jong C. Park, Yuki Arase, Baotian Hu, Wei Lu, Derry Wijaya, Ayu Purwarianti, and Adila Alfa Krisnadhi (eds.), Proceedings of the 13th International Joint Conference on Natural Language Processing and the 3rd Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 305-329, Nusa Dua, Bali, November 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.ijcnlp-main.20. URL https://aclanthology.org/2023.ijcnlp-main.20/.", + "Yubo Ma, Zhibin Gou, Junheng Hao, Ruochen Xu, Shuohang Wang, Liangming Pan, Yujiu Yang, Yixin Cao, and Aixin Sun. Sciagent: Tool-augmented language models for scientific reasoning. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (eds.), Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, EMNLP 2024, Miami, FL, USA, November 12-16, 2024, pp. 15701-15736. Association for Computational Linguistics, 2024a. URL https://aclanthology.org/2024.emnlp-main.880.", + "Yubo Ma, Zhibin Gou, Junheng Hao, Ruochen Xu, Shuohang Wang, Liangming Pan, Yujiu Yang, Yixin Cao, Aixin Sun, Hany Awadalla, et al. Sciagent: Tool-augmented language models for scientific reasoning. arXiv preprint arXiv:2402.11451, 2024b.", + "Aman Madaan, Katherine Hermann, and Amir Yazdanbakhsh. What makes chain-of-thought prompting effective? a counterfactual study. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Findings of the Association for Computational Linguistics: EMNLP 2023, pp. 1448-1535, Singapore, December 2023a. Association for Computational Linguistics. doi: 10.18653/v1/2023.findings-emnlp.101. URL https://aclanthology.org/2023-findings-emnlp.101/.", + "Aman Madaan, Niket Tandon, Prakhar Gupta, Skyler Hallinan, Luyu Gao, Sarah Wiegreffe, Uri Alon, Nouha Dziri, Shrimai Prabhumoye, Yiming Yang, Shashank Gupta, Bodhisattwa Prasad Majumder, Katherine Hermann, Sean Welleck, Amir Yazdanbakhsh, and Peter Clark. Self-refine: Iterative refinement with self-feedback. In Thirty-seventh Conference on Neural Information Processing Systems, 2023b. URL https://openreview.net/forum?id=S37h0erQLB." + ], + "bbox": [ + 114, + 102, + 883, + 924 + ], + "page_idx": 55 + }, + { + "type": "page_number", + "text": "56", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 55 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Aman Madaan, Niket Tandon, Prakhar Gupta, Skyler Hallinan, Luyu Gao, Sarah Wiegreffe, Uri Alon, Nouha Dziri, Shrimai Prabhumoye, Yiming Yang, et al. Self-refine: Iterative refinement with self-feedback. Advances in Neural Information Processing Systems, 36, 2024.", + "Dakota Mahan, Duy Van Phung, Rafael Rafailov, Chase Blagden, Nathan Lile, Louis Castricato, Jan-Philipp Franken, Chelsea Finn, and Alon Albalak. Generative reward models. arXiv preprint arXiv:2410.12832, 2024.", + "XTX Markets. AIMO Progress Prize: July 2024 results. https://aimoprize.com/updates/2024-07-20-progress-prize-results, 2024.", + "Tula Masterman, Sandi Besen, Mason Sawtell, and Alex Chao. The landscape of emerging ai agent architectures for reasoning, planning, and tool calling: A survey, 2024. URL https://arxiv.org/abs/2404.11584.", + "Marco Matta, Gian Carlo Cardarilli, Luca Di Nunzio, Rocco Fazzolari, Daniele Giardino, M Re, F Silvestri, and S Spanò. Q-rts: a real-time swarm intelligence based on multi-agent q-learning. _Electronics Letters_, 55(10):589–591, 2019.", + "Nat McAleese, Rai Michael Pokorny, Juan Felipe Ceron Uribe, Evgenia Nitishinskaya, Maja Trebacz, and Jan Leike. Llm critics help catch llm bugs. arXiv preprint arXiv:2407.00215, 2024.", + "Raja Sekhar Reddy Mekala, Yasaman Razeghi, and Sameer Singh. EchoPrompt: Instructing the model to rephrase queries for improved in-context learning. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 2: Short Papers), pp. 399-432, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.naacl-short.35. URL https://aclanthology.org/2024.naacl-short.35.", + "Yu Meng, Mengzhou Xia, and Danqi Chen. Simpo: Simple preference optimization with a reference-free reward. arXiv preprint arXiv:2405.14734, 2024.", + "William Merrill and Ashish Sabharwal. The expressive power of transformers with chain of thought. In *The Twelfth International Conference on Learning Representations*, 2024. URL https://openreview.net/forum?id=NjNGLPh8Wh.", + "Yingqian Min, Zhipeng Chen, Jinhao Jiang, Jie Chen, Jia Deng, Yiwen Hu, Yiru Tang, Jiapeng Wang, Xiaoxue Cheng, Huatong Song, et al. Imitate, explore, and self-improve: A reproduction report on slow-thinking reasoning systems. arXiv preprint arXiv:2412.09413, 2024.", + "Yifei Ming, Senthil Purushwalkam, Shrey Pandit, Zixuan Ke, Xuan-Phi Nguyen, Caiming Xiong, and Shafiq Joty. Faitheval: Can your language model stay faithful to context, even if \"the moon is made of marshmallows\". In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=UeVx6L59fg.", + "Seyed Iman Mirzadeh, Keivan Alizadeh, Hooman Shahrokhi, Oncel Tuzel, Samy Bengio, and Mehrdad Farajtabar. GSM-symbolic: Understanding the limitations of mathematical reasoning in large language models. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=AjXkRZIvjb.", + "Swaroop Mishra, Daniel Khashabi, Chitta Baral, and Hannaneh Hajishirzi. Cross-task generalization via natural language crowdsourcing instructions. In Smaranda Muresan, Preslav Nakov, and Aline Villavicencio (eds.), Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 3470-3487, Dublin, Ireland, May 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.acl-long.244. URL https://aclanthology.org/2022.acl-long.244/.", + "Philipp Mondorf and Barbara Plank. Beyond accuracy: Evaluating the reasoning behavior of large language models - a survey. In First Conference on Language Modeling, 2024. URL https://openreview.net/forum?id=Lmjgl2n11u." + ], + "bbox": [ + 112, + 102, + 883, + 925 + ], + "page_idx": 56 + }, + { + "type": "page_number", + "text": "57", + "bbox": [ + 488, + 948, + 508, + 959 + ], + "page_idx": 56 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Nieves Montes, Michael Luck, Nardine Osman, Odinaldo Rodrigues, and Carles Sierra. Combining theory of mind and abductive reasoning in agent-oriented programming. Autonomous Agents and Multi-Agent Systems, 37(2):36, 2023.", + "Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettle-moyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling, 2025.", + "Md Mahadi Hasan Nahid and Davood Rafiei. NormTab: Improving symbolic reasoning in LLMs through tabular data normalization. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (eds.), Findings of the Association for Computational Linguistics: EMNLP 2024, pp. 3569-3585, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-emnlp.203. URL https://aclanthology.org/2024 findings-emnlp.203/.", + "Allen Newell, John C Shaw, and Herbert A Simon. Report on a general problem solving program. In IFIP congress, volume 256, pp. 64. Pittsburgh, PA, 1959.", + "Allen Newell, Herbert Alexander Simon, et al. Human problem solving, volume 104. Prentice-hall Englewood Cliffs, NJ, 1972.", + "Khanh Nguyen, Hal Daumé III, and Jordan Boyd-Graber. Reinforcement learning for bandit neural machine translation with simulated human feedback. arXiv preprint arXiv:1707.07402, 2017.", + "Ansong Ni, Miltiadis Allamanis, Arman Cohan, Yinlin Deng, Kensen Shi, Charles Sutton, and Pengcheng Yin. Next: Teaching large language models to reason about code execution. In ICML, 2024. URL https://openreview.net/forum?id=B1W712hMBi.", + "Tobias Nipkow, Markus Wenzel, and Lawrence C Paulson. Isabelle/HOL: a proof assistant for higher-order logic. 2002.", + "NovaSky Team. Sky-t1: Train your own o1 preview model within $450. https://novaskyai.github.io/posts/sky-t1, 2025. Accessed: 2025-01-09.", + "Maxwell Nye, Anders Andreassen, Guy Gur-Ari, Henryk Witold Michalewski, Jacob Austin, David Bieber, David Martin Dohan, Aitor Lewkowycz, Maarten Paul Bosma, David Luan, Charles Sutton, and Augustus Odena. Show your work: Scratchpads for intermediate computation with language models, 2021. https://arxiv.org/abs/2112.00114.", + "Catherine Olsson, Nelson Elhage, Neel Nanda, Nicholas Joseph, Nova DasSarma, Tom Henighan, Ben Mann, Amanda Askell, Yuntao Bai, Anna Chen, et al. In-context learning and induction heads. arXiv preprint arXiv:2209.11895, 2022.", + "OpenAI. Introducing gpt-4.5. https://openai.com/index/introducing-gpt-4-5/, 2025.", + "OpenAI, :, Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, Alex Iftimie, Alex Karpenko, Alex Tachard Passos, Alexander Neitz, Alexander Prokofiev, Alexander Wei, Allison Tam, Ally Bennett, Ananya Kumar, Andre Saraiva, Andrea Vallone, Andrew Duberstein, Andrew Kondrich, Andrey Mishchenko, Andy Applebaum, Angela Jiang, Ashvin Nair, Barret Zoph, Behrooz Ghorbani, Ben Rossen, Benjamin Sokolowsky, Boaz Barak, Bob McGrew, Borys Minaiev, Botao Hao, Bowen Baker, Brandon Houghton, Brandon McKinzie, Brydon Eastman, Camillo Lugaresi, Cary Bassin, Cary Hudson, Chak Ming Li, Charles de Bourcy, Chelsea Voss, Chen Shen, Chong Zhang, Chris Koch, Chris Orsinger, Christopher Hesse, Claudia Fischer, Clive Chan, Dan Roberts, Daniel Kappler, Daniel Levy, Daniel Selsam, David Dohan, David Farhi, David Mely, David Robinson, Dimitris Tsipras, Doug Li, Dragos Oprica, Eben Freeman, Eddie Zhang, Edmund Wong, Elizabeth Proehl, Enoch Cheung, Eric Mitchell, Eric Wallace, Erik Ritter, Evan Mays, Fan Wang, Felipe Petroski Such, Filippo Raso, Florencia Leoni, Foivos Tsimpourlas, Francis Song, Fred von Lohmann, Freddie Sulit, Geoff Salmon, Giambattista Parascandolo, Gildas Chabot, Grace Zhao, Greg Brockman, Guillaume Leclerc, Hadi Salman, Haiming Bao, Hao Sheng, Hart Andrin, Hessam Bagherinezhad, Hongyu Ren, Hunter Lightman, Hyung Won Chung, Ian Kivlichan, Ian O'Connell, Ian Osband, Ignasi Clavera Gilaberte, Ilge Akkaya, Ilya Kostrikov, Ilya Sutskever, Irina Kofman, Jakub Pachocki, James Lennon, Jason" + ], + "bbox": [ + 112, + 102, + 883, + 925 + ], + "page_idx": 57 + }, + { + "type": "page_number", + "text": "58", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 57 + }, + { + "type": "text", + "text": "Wei, Jean Harb, Jerry Twore, Jiacheng Feng, Jiahui Yu, Jiayi Weng, Jie Tang, Jieqi Yu, Joaquin Quñonero Candela, Joe Palermo, Joel Parish, Johannes Heidecke, John Hallman, John Rizzo, Jonathan Gordon, Jonathan Uesato, Jonathan Ward, Joost Huizinga, Julie Wang, Kai Chen, Kai Xiao, Karan Singhal, Karina Nguyen, Karl Cobbe, Katy Shi, Kayla Wood, Kendra Rimbach, Keren Gu-Lemberg, Kevin Liu, Kevin Lu, Kevin Stone, Kevin Yu, Lama Ahmad, Lauren Yang, Leo Liu, Leon Maksin, Leyton Ho, Liam Fedus, Lilian Weng, Linden Li, Lindsay McCallum, Lindsey Held, Lorenz Kuhn, Lukas Kondraciuk, Lukasz Kaiser, Luke Metz, Madelaine Boyd, Maja Trebacz, Manas Joglekar, Mark Chen, Marko Tintor, Mason Meyer, Matt Jones, Matt Kaufer, Max Schwarzer, Meghan Shah, Mehmet Yatbaz, Melody Y. Guan, Mengyuan Xu, Mengyuan Yan, Mia Glaese, Mianna Chen, Michael Lampe, Michael Malek, Michele Wang, Michelle Fradin, Mike McClay, Mikhail Pavlov, Miles Wang, Mingxuan Wang, Mira Murati, Mo Bavarian, Mostafa Rohaninejad, Nat McAleese, Neil Chowdhury, Neil Chowdhury, Nick Ryder, Nikolas Tezak, Noam Brown, Ofir Nachum, Oleg Boiko, Oleg Murk, Olivia Watkins, Patrick Chao, Paul Ashbourne, Pavel Izmailov, Peter Zhokhov, Rachel Dias, Rahul Arora, Randall Lin, Rapha Gontijo Lopes, Raz Gaon, Reah Miyara, Reimar Leike, Renny Hwang, Rhythm Garg, Robin Brown, Roshan James, Rui Shu, Ryan Cheu, Ryan Greene, Saachi Jain, Sam Altman, Sam Toizer, Sam Toyer, Samuel Miserendino, Sandhini Agarwal, Santiago Hernandez, Sasha Baker, Scott McKinney, Scottie Yan, Shengjia Zhao, Shengli Hu, Shibani Santurkar, Shraman Ray Chaudhuri, Shuyuan Zhang, Siyuan Fu, Spencer Papay, Steph Lin, Suchir Balaji, Suvansh Sanjeev, Szymon Sidor, Tal Broda, Aidan Clark, Tao Wang, Taylor Gordon, Ted Sanders, Tejal Patwardhan, Thibault Sottiaux, Thomas Degry, Thomas Dimson, Tianhao Zheng, Timur Garipov, Tom Stasi, Trapit Bansal, Trevor Creech, Troy Peterson, Tyna Eloundou, Valerie Qi, Vineet Kosaraju, Vinnie Monaco, Vitchyr Pong, Vlad Fomenko, Weiyi Zheng, Wenda Zhou, Wes McCabe, Wojciech Zaremba, Yann Dubois, Yinghai Lu, Yining Chen, Young Cha, Yu Bai, Yuchen He, Yuchen Zhang, Yunyun Wang, Zheng Shao and Zhuohan Li. Openai o1 system card 2024. URL https://arxiv.org/abs/2412.16720.", + "bbox": [ + 129, + 102, + 883, + 450 + ], + "page_idx": 58 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "OpenAI, :, Ahmed El-Kishky, Alexander Wei, Andre Saraiva, Borys Minaev, Daniel Selsam, David Dohan, Francis Song, Hunter Lightman, Ignasi Clavera, Jakub Pachocki, Jerry Tworek, Lorenz Kuhn, Lukasz Kaiser, Mark Chen, Max Schwarzer, Mostafa Rohaninejad, Nat McAleese, o3 contributors, Oleg Mürk, Rhythm Garg, Rui Shu, Szymon Sidor, Vineet Kosaraju, and Wenda Zhou. Competitive programming with large reasoning models, 2025. URL https://arxiv.org/abs/2502.06807.", + "Long Ouyang, Jeff Wu, Xu Jiang, Diogo Almeida, Carroll L. Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, John Schulman, Jacob Hilton, Fraser Kelton, Luke Miller, Maddie Simens, Amanda Askell, Peter Welinder, Paul Christiano, Jan Leike, and Ryan Lowe. Training language models to follow instructions with human feedback, 2022. URL https://arxiv.org/abs/2203.02155.", + "Bo Pan, Jiaying Lu, Ke Wang, Li Zheng, Zhen Wen, Yingchaojie Feng, Minfeng Zhu, and Wei Chen. Agent-coord: Visually exploring coordination strategy for llm-based multi-agent collaboration. arXiv preprint arXiv:2404.11943, 2024a.", + "Liangming Pan, Alon Albalak, Xinyi Wang, and William Wang. Logic-LM: Empowering large language models with symbolic solvers for faithful logical reasoning. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Findings of the Association for Computational Linguistics: EMNLP 2023, pp. 3806-3824, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-emnlp.248. URL https://aclanthology.org/2023-findings-emnlp.248/.", + "Liangming Pan, Michael Saxon, Wenda Xu, Deepak Nathani, Xinyi Wang, and William Yang Wang. Automatically correcting large language models: Surveying the landscape of diverse automated correction strategies. Transactions of the Association for Computational Linguistics, 12:484-506, 2024b. doi: 10.1162/tacl_a_00660. URL https://aclanthology.org/2024.tacl-1.27/.", + "Bhargavi Paranjape, Julian Michael, Marjan Ghazvininejad, Hannaneh Hajishirzi, and Luke Zettlemoyer. Prompting contrastive explanations for commonsense reasoning tasks. In Chengqing Zong, Fei Xia, Wenjie Li, and Roberto Navigli (eds.), Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021, pp. 4179-4192, Online, August 2021. Association for Computational Linguistics. doi: 10.18653/v1/2021-findings-acl.366. URL https://aclanthology.org/2021-findings-acl.366/." + ], + "bbox": [ + 114, + 460, + 883, + 925 + ], + "page_idx": 58 + }, + { + "type": "page_number", + "text": "59", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 58 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Remo Pareschi. Abductive reasoning with the gpt-4 language model: Case studies from criminal investigation, medical practice, scientific research. _Sistema intelligenti_, 35(2):435-444, 2023.", + "John Arthur Passmore. Philosophical reasoning. 1961.", + "Pouya Pezeshkpour, Eser Kandogan, Nikita Bhutani, Sajjadur Rahman, Tom Mitchell, and Estevam Hruschka. Reasoning capacity in multi-agent systems: Limitations, challenges and human-centered solutions, 2024. URL https://arxiv.org/abs/2402.01108.", + "Gabriel Poesia, Kanishk Gandhi, Eric Zelikman, and Noah Goodman. Certified deductive reasoning with language models. Transactions on Machine Learning Research, 2024. ISSN 2835-8856. URL https://openreview.net/forum?id=yXnwrs2T16.", + "Mohammadreza Pourreza and Davood Rafiei. DIN-SQL: Decomposed in-context learning of text-to-SQL with self-correction. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=p53QDxSIc5.", + "Ben Prystawski, Michael Li, and Noah D. Goodman. Why think step by step? reasoning emerges from the locality of experience. In Alice Oh, Tristan Naumann, Amir Globerson, Kate Saenko, Moritz Hardt, and Sergey Levine (eds.), Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. URL http://papers.nips.cc/paper_files/paper/2023/bitstream/e0af79ad53a336b4c4b4f7e2a68eb609-Abstract-Conference.html.", + "Reid Pryzant, Dan Iter, Jerry Li, Yin Lee, Chenguang Zhu, and Michael Zeng. Automatic prompt optimization with \"gradient descent\" and beam search. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pp. 7957-7968, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.494. URL https://aclanthology.org/2023.emnlp-main.494/.", + "Pranav Putta, Edmund Mills, Naman Garg, Sumeet Motwani, Chelsea Finn, Divyansh Garg, and Rafael Rafailov. Agent q: Advanced reasoning and learning for autonomous ai agents. arXiv preprint arXiv:2408.07199, 2024.", + "Zhenting Qi, Hongyin Luo, Xuliang Huang, Zhuokai Zhao, Yibo Jiang, Xiangjun Fan, Himabindu Lakkaraju, and James Glass. Quantifying generalization complexity for large language models, 2024. URL https://arxiv.org/abs/2410.01769.", + "Shuofei Qiao, Honghao Gui, Chengfei Lv, Qianghuai Jia, Huajun Chen, and Ningyu Zhang. Making language models better tool learners with execution feedback. arXiv preprint arXiv:2305.13068, 2023a.", + "Shuofei Qiao, Yixin Ou, Ningyu Zhang, Xiang Chen, Yunzhi Yao, Shumin Deng, Chuanqi Tan, Fei Huang, and Huajun Chen. Reasoning with language model prompting: A survey. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 5368-5393, Toronto, Canada, July 2023b. URL https://aclanthology.org/2023.acl-long.294/.", + "Chengwei Qin, Wenhan Xia, Tan Wang, Fangkai Jiao, Yuchen Hu, Bosheng Ding, Ruirui Chen, and Shafiq Joty. Relevant or random: Can llms truly perform analogical reasoning? ACL-Findings, 2025. URL https://arxiv.org/abs/2404.12728.", + "Yiwei Qin, Xuefeng Li, Haoyang Zou, Yixiu Liu, Shijie Xia, Zhen Huang, Yixin Ye, Weizhe Yuan, Hector Liu, Yuanzhi Li, and Pengfei Liu. O1 replication journey: A strategic progress report - part 1, 2024. URL https://arxiv.org/abs/2410.18982.", + "Xihe Qiu, Haoyu Wang, Xiaoyu Tan, Chao Qu, Yujie Xiong, Yuan Cheng, Yinghui Xu, Wei Chu, and Yuan Qi. Towards collaborative intelligence: Propagating intentions and reasoning for multi-agent coordination with large language models, 2024. URL https://arxiv.org/abs/2407.12532." + ], + "bbox": [ + 112, + 102, + 883, + 924 + ], + "page_idx": 59 + }, + { + "type": "page_number", + "text": "60", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 59 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Yuxiao Qu, Tianjun Zhang, Naman Garg, and Aviral Kumar. Recursive introspection: Teaching language model agents how to self-improve. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024a. URL https://openreview.net/forum?id=DRC9pZwBwR.", + "Yuxiao Qu, Tianjun Zhang, Naman Garg, and Aviral Kumar. Recursive introspection: Teaching language model agents how to self-improve. arXiv preprint arXiv:2407.18219, 2024b.", + "Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D. Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. In Alice Oh, Tristan Naumann, Amir Globerson, Kate Saenko, Moritz Hardt, and Sergey Levine (eds.), Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. URL http://papers.nips.cc/paper_files/paper/2023/hash/a85b405ed65c6477a4fe8302b5e06ce7-AAbstract-Conference.html.", + "Nazneen Fatema Rajani, Bryan McCann, Caiming Xiong, and Richard Socher. Explain yourself! leveraging language models for commonsense reasoning. arXiv preprint arXiv:1906.02361, 2019.", + "Shyam Sundhar Ramesh, Yifan Hu, Iason Chaimalas, Viraj Mehta, Pier Giuseppe Sessa, Haitham Bou Ammar, and Ilija Bogunovic. Group robust preference optimization in reward-free rlhf. arXiv preprint arXiv:2405.20304, 2024.", + "Jingqing Ruan, Yali Du, Xuantang Xiong, Dengpeng Xing, Xiyun Li, Linghui Meng, Haifeng Zhang, Jun Wang, and Bo Xu. Gcs: Graph-based coordination strategy for multi-agent reinforcement learning. arXiv preprint arXiv:2201.06257, 2022.", + "Ohad Rubin, Jonathan Herzig, and Jonathan Berant. Learning to retrieve prompts for in-context learning. In Marine Carpuat, Marie-Catherine de Marneffé, and Ivan Vladimir Meza Ruiz (eds.), Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 2655-2671, Seattle, United States, July 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.naacl-main.191. URL https://aclanthology.org/2022.naacl-main.191/.", + "Stuart Russell and Peter Norvig. Artificial Intelligence: A Modern Approach. Prentice Hall, 3 edition, 2010.", + "Jon Saad-Falcon, Rajan Vivek, William Berrios, Nandita Shankar Naik, Matija Franklin, Bertie Vidgen, Amanpreet Singh, Douwe Kiela, and Shikib Mehri. Lmunit: Fine-grained evaluation with natural language unit tests. arXiv preprint arXiv:2412.13091, 2024.", + "Amir Saeidi, Shivanshu Verma, Aswin RRV, and Chitta Baral. Triple preference optimization: Achieving better alignment with less data in a single step optimization. arXiv preprint arXiv:2405.16681, 2024.", + "Victor Sanh, Albert Webson, Colin Raffel, Stephen Bach, Lintang Sutawika, Zaid Alyafeai, Antoine Chaffin, Arnaud Stiegler, Arun Raja, Manan Dey, M Saiful Bari, Canwen Xu, Urmish Thakker, Shanya Sharma Sharma, Eliza Szczechla, Taewoon Kim, Gunjan Chhablani, Nihal Nayak, Debajyoti Datta, Jonathan Chang, Mike Tian-Jian Jiang, Han Wang, Matteo Manica, Sheng Shen, Zheng Xin Yong, Harshit Pandey, Rachel Bawden, Thomas Wang, Trishala Neeraj, Jos Rozen, Abheesht Sharma, Andrea Santilli, Thibault Fevry, Jason Alan Fries, Ryan Teehan, Teven Le Scao, Stella Biderman, Leo Gao, Thomas Wolf, and Alexander M Rush. Multitask prompted training enables zero-shot task generalization. In International Conference on Learning Representations, 2022. URL https://openreview.net/forum?id=9Vrb9D0WI4.", + "Abulhair Saparov and He He. Language models are greedy reasoners: A systematic formal analysis of chain-of-thought. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=qFVVBzXxR2V.", + "William Saunders, Catherine Yeh, Jeff Wu, Steven Bills, Long Ouyang, Jonathan Ward, and Jan Leike. Self-critiquing models for assisting human evaluators. arXiv preprint arXiv:2206.05802, 2022.", + "Erik Schluntz and Barry Zhang. Building effective agents. https://www.anthropic.com/, Dec 2024. URL https://www.anthropic.com/research/building-effective-agents." + ], + "bbox": [ + 112, + 102, + 883, + 926 + ], + "page_idx": 60 + }, + { + "type": "page_number", + "text": "61", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 60 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017.", + "Melanie Sclar, Yejin Choi, Yulia Tsvetkov, and Alane Suhr. Quantifying language models' sensitivity to spurious features in prompt design or: How i learned to start worrying about prompt formatting. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=RIu51yNXjT.", + "S Seals and Valerie Shalin. Evaluating the deductive competence of large language models. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 8614-8630, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.naacl-long.476. URL https://aclanthology.org/2024.naacl-long.476/.", + "H Seo and D Lee. Reinforcement learning and strategic reasoning during social decision-making. In Decision Neuroscience, pp. 225-231. Elsevier, 2017.", + "Pier Giuseppe Sessa, Robert Dadashi, Léonard Hussenot, Johan Ferret, Nino Vieillard, Alexandre Ramé, Bobak Shahriari, Sarah Perrin, Abe Friesen, Geoffrey Cideron, Sertan Girgin, Piotr Stanczyk, Andrea Michi, Danila Sinopalnikov, Sabela Ramos, Amélie Héliou, Aliaksei Severyn, Matt Hoffman, Nikola Momchev, and Olivier Bachem. BOND: aligning llms with best-of-n distillation. CoRR, abs/2407.14622, 2024. URL https://doi.org/10.48550/arXiv.2407.14622.", + "Amrith Setlur, Chirag Nagpal, Adam Fisch, Xinyang Geng, Jacob Eisenstein, Rishabh Agarwal, Alekh Agarwal, Jonathan Berant, and Aviral Kumar. Rewarding progress: Scaling automated process verifiers for llm reasoning. arXiv preprint arXiv:2410.08146, 2024a.", + "Amrith Setlur, Chirag Nagpal, Adam Fisch, Xinyang Geng, Jacob Eisenstein, Rishabh Agarwal, Alekh Agarwal, Jonathan Berant, and Aviral Kumar. Rewarding progress: Scaling automated process verifiers for LLM reasoning. CoRR, abs/2410.08146, 2024b. doi: 10.48550/ARXIV.2410.08146. URL https://doi.org/10.48550/arXiv.2410.08146.", + "Murray Shanahan, Kyle McDonell, and Laria Reynolds. Role play with large language models. Nature, 623 (7987):493-498, 2023a.", + "Murray Shanahan, Kyle McDonell, and Laria Reynolds. Role-play with large language models, 2023b. URL https://arxiv.org/abs/2305.16367.", + "Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024.", + "Zhengliang Shi, Weiwei Sun, Shen Gao, Pengjie Ren, Zhumin Chen, and Zhaochun Ren. Generate-then-ground in retrieval-augmented generation for multi-hop question answering. arXiv preprint arXiv:2406.14891, 2024.", + "Noah Shinn, Federico Cassano, Ashwin Gopinath, Karthik Narasimhan, and Shunyu Yao. Reflexion: Language agents with verbal reinforcement learning. Advances in Neural Information Processing Systems, 36, 2024.", + "Kumar Shridhar, Koustuv Sinha, Andrew Cohen, Tianlu Wang, Ping Yu, Ramakanth Pasunuru, Mrinmaya Sachan, Jason Weston, and Asli Celikyilmaz. The art of llm refinement: Ask, refine, and trust. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 5872-5883, 2024.", + "Chenglei Si, Zhe Gan, Zhengyuan Yang, Shuohang Wang, Jianfeng Wang, Jordan Lee Boyd-Graber, and Lijuan Wang. Prompting GPT-3 to be reliable. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=98p5x51L5af." + ], + "bbox": [ + 112, + 102, + 883, + 925 + ], + "page_idx": 61 + }, + { + "type": "page_number", + "text": "62", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 61 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling LLM test-time compute optimally can be more effective than scaling model parameters. CoRR, abs/2408.03314, 2024. doi: 10.48550/ARXIV.2408.03314. URL https://doi.org/10.48550/arXiv.2408.03314.", + "Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling test-time compute optimally can be more effective than scaling LLM parameters. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=4FWAwZtd2n.", + "Yifan Song, Weimin Xiong, Xiutian Zhao, Dawei Zhu, Wenhao Wu, Ke Wang, Cheng Li, Wei Peng, and Sujian Li. Agentbank: Towards generalized llm agents via fine-tuning on $50000+$ interaction trajectories. arXiv preprint arXiv:2410.07706, 2024.", + "Zayne Sprague, Fangcong Yin, Juan Diego Rodriguez, Dongwei Jiang, Manya Wadhwa, Prasann Singhal, Xinyu Zhao, Xi Ye, Kyle Mahowald, and Greg Durrett. To cot or not to cot? chain-of-thought helps mainly on math and symbolic reasoning. arXiv preprint arXiv:2409.12183, 2024a. URL https://arxiv.org/pdf/2409.12183.", + "Zayne Sprague, Fangcong Yin, Juan Diego Rodriguez, Dongwei Jiang, Manya Wadhwa, Prasann Singhal, Xinyu Zhao, Xi Ye, Kyle Mahowald, and Greg Durrett. To cot or not to cot? chain-of-thought helps mainly on math and symbolic reasoning, 2024b. URL https://arxiv.org/abs/2409.12183.", + "Keith E Stanovich and Richard F West. Individual differences in reasoning: Implications for the rationality debate? Behavioral and Brain Sciences, 23(5):645-665, 2000.", + "Kaya Stechly, Matthew Marquez, and Subbarao Kambhampati. Gpt-4 doesn't know it's wrong: An analysis of iterative prompting for reasoning problems. arXiv preprint arXiv:2310.12397, 2023.", + "Kaya Stechly, Karthik Valmeekam, and Subbarao Kambhampati. On the self-verification limitations of large language models on reasoning and planning tasks. arXiv preprint arXiv:2402.08115, 2024.", + "Nisan Stiannon, Long Ouyang, Jeff Wu, Daniel M. Ziegler, Ryan Lowe, Chelsea Voss, Alec Radford, Dario Amodei, and Paul Christiano. Learning to summarize from human feedback. In Proceedings of the 34th International Conference on Neural Information Processing Systems, NIPS '20, Red Hook, NY, USA, 2020. Curran Associates Inc. ISBN 9781713829546.", + "Benedikt Stroebl, Sayash Kapoor, and Arvind Narayanan. Inference Scaling fLaws: The Limits of LLM Resampling with Imperfect Verifiers. arXiv preprint arXiv:2411.17501, 2024.", + "Vighnesh Subramaniam, Yilun Du, Joshua B Tenenbaum, Antonio Torralba, Shuang Li, and Igor Mordatch. Multiagent finetuning: Self improvement with diverse reasoning chains. arXiv preprint arXiv:2501.05707, 2025.", + "Yuan Sui, Mengyu Zhou, Mingjie Zhou, Shi Han, and Dongmei Zhang. Table meets llm: Can large language models understand structured table data? a benchmark and empirical study. In Proceedings of the 17th ACM International Conference on Web Search and Data Mining, WSDM '24, pp. 645-654, New York, NY, USA, 2024. Association for Computing Machinery. ISBN 9798400703713. doi: 10.1145/3616855.3635752. URL https://doi.org/10.1145/3616855.3635752.", + "Sainbayar Sukhbaatar, Rob Fergus, et al. Learning multiagent communication with backpropagation. Advances in neural information processing systems, 29, 2016.", + "Theodore R. Sumers, Shunyu Yao, Karthik Narasimhan, and Thomas L. Griffiths. Cognitive architectures for language agents, 2024. URL https://arxiv.org/abs/2309.02427.", + "Jiashuo Sun, Chengjin Xu, Lumingyuan Tang, Saizhuo Wang, Chen Lin, Yeyun Gong, Heung-Yeung Shum, and Jian Guo. Think-on-graph: Deep and responsible reasoning of large language model with knowledge graph. arXiv preprint arXiv:2307.07697, 2023." + ], + "bbox": [ + 112, + 102, + 883, + 926 + ], + "page_idx": 62 + }, + { + "type": "page_number", + "text": "63", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 62 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Jiaxing Sun, Weiquan Huang, Jiang Wu, Chenya Gu, Wei Li, Songyang Zhang, Hang Yan, and Conghui He. Benchmarking Chinese commonsense reasoning of LLMs: From Chinese-specifics to reasoning-memorization correlations. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 11205-11228, Bangkok, Thailand, August 2024a. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.604. URL https://aclanthology.org/2024.acl-long.604/.", + "Shichao Sun, Junlong Li, Weizhe Yuan, Ruifeng Yuan, Wenjie Li, and Pengfei Liu. The critique of critique. arXiv preprint arXiv:2401.04518, 2024b.", + "Zhiqing Sun, Longhui Yu, Yikang Shen, Weiyang Liu, Yiming Yang, Sean Welleck, and Chuang Gan. Easy-to-hard generalization: Scalable alignment beyond human supervision. CoRR, abs/2403.09472, 2024c. doi: 10.48550/ARXIV.2403.09472. URL https://doi.org/10.48550/arXiv.2403.09472.", + "Richard S Sutton. Reinforcement learning: An introduction. A Bradford Book, 2018.", + "Mirac Suzgun and Adam Tauman Kalai. Meta-prompting: Enhancing language models with task-agnostic scaffolding. arXiv preprint arXiv:2401.12954, 2024a.", + "Mirac Suzgun and Adam Tauman Kalai. Meta-prompting: Enhancing language models with task-agnostic scaffolding, 2024b. URL https://arxiv.org/abs/2401.12954.", + "Sijun Tan, Siyuan Zhuang, Kyle Montgomery, William Y Tang, Alejandro Cuadron, Chenguang Wang, Raluca Ada Popa, and Ion Stoica. Judgebench: A benchmark for evaluating llm-based judges. arXiv preprint arXiv:2410.12784, 2024.", + "Zhengyang Tang, Xingxing Zhang, Benyou Wang, and Furu Wei. Mathscale: Scaling instruction tuning for mathematical reasoning. In Forty-first International Conference on Machine Learning, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=Kjww7ZN47M.", + "Zhengyang Tang, Ziniu Li, Zhenyang Xiao, Tian Ding, Ruoyu Sun, Benyou Wang, Dayiheng Liu, Fei Huang, Tianyu Liu, Bowen Yu, and Junyang Lin. Enabling scalable oversight via self-evolving critic, 2025. URL https://arxiv.org/abs/2501.05727.", + "Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li, Carlos Guestrin, Percy Liang, and Tatsunori B. Hashimoto. Stanford alpaca: An instruction-following llama model. https://github.com/tatsu-lab/stanford_alpaca, 2023.", + "Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, Chuning Tang, Congcong Wang, Dehao Zhang, Enming Yuan, Enzhe Lu, Fengxiang Tang, Flood Sung, Guangda Wei, Guokun Lai, Haiqing Guo, Han Zhu, Hao Ding, Hao Hu, Hao Yang, Hao Zhang, Haotian Yao, Haotian Zhao, Haoyu Lu, Haoze Li, Haozhen Yu, Hongcheng Gao, Huabin Zheng, Huan Yuan, Jia Chen, Jianhang Guo, Jianlin Su, Jianzhou Wang, Jie Zhao, Jin Zhang, Jingyuan Liu, Junjie Yan, Junyan Wu, Lidong Shi, Ling Ye, Longhui Yu, Mengnan Dong, Neo Zhang, Ningchen Ma, Qiwei Pan, Qucheng Gong, Shaowei Liu, Shengling Ma, Shupeng Wei, Sihan Cao, Siying Huang, Tao Jiang, Weihao Gao, Weimin Xiong, Weiran He, Weixiao Huang, Wenhao Wu, Wenyang He, Xianghui Wei, Xianqing Jia, Xingzhe Wu, Xinran Xu, Xinxing Zu, Xinyu Zhou, Xuehai Pan, Y. Charles, Yang Li, Yangyang Hu, Yangyang Liu, Yanru Chen, Yejie Wang, Yibo Liu, Yidao Qin, Yifeng Liu, Ying Yang, Yiping Bao, Yulun Du, Yuxin Wu, Yuzhi Wang, Zaida Zhou, Zhaoji Wang, Zhaowei Li, Zhen Zhu, Zheng Zhang, Zhexu Wang, Zhilin Yang, Zhiqi Huang, Ziyao Xu, and Zonghan Yang. Kimi k1.5: Scaling reinforcement learning with llms, 2025. URL https://arxiv.org/abs/2501.12599.", + "Qwen Team. Qwen2.5: A party of foundation models, September 2024. URL https://qwenlm.github.io/blog/qwen2.5/.", + "Amitayush Thakur, George Tsoukalas, Yeming Wen, Jimmy Xin, and Swarat Chaudhuri. An in-context learning agent for formal theorem-proving. In Conference on Language Modeling (COLM), 2024." + ], + "bbox": [ + 112, + 102, + 883, + 925 + ], + "page_idx": 63 + }, + { + "type": "page_number", + "text": "64", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 63 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "The Coq Development Team. The Coq Proof Assistant. 2024. URL https://coq.inria.fr/doc/V8.20.0/refman/index.html. Version 8.20.0.", + "Qingyuan Tian, Hanlun Zhu, Lei Wang, Yang Li, and Yunshi Lan. $\\mathbf{R}^3$ prompting: Review, rephrase and resolve for chain-of-thought reasoning in large language models under noisy context. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Findings of the Association for Computational Linguistics: EMNLP 2023, pp. 1670-1685, Singapore, December 2023. Association for Computational Linguistics. doi: 10. 18653/v1/2023-findings-emnlp.114. URL https://aclanthology.org/2023-findings-emnlp.114/.", + "Ye Tian, Baolin Peng, Linfeng Song, Lifeng Jin, Dian Yu, Haitao Mi, and Dong Yu. Toward self-improvement of llms via imagination, searching, and criticizing. arXiv preprint arXiv:2404.12253, 2024.", + "Yuxuan Tong, Xiwen Zhang, Rui Wang, Ruidong Wu, and Junxian He. Dart-math: Difficulty-aware rejection tuning for mathematical problem-solving. CoRR, abs/2407.13690, 2024. doi: 10.48550/ARXIV.2407.13690. URL https://doi.org/10.48550/arXiv.2407.13690.", + "Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023.", + "Vince Trencsenyi, Agnieszka Mensfelt, and Kostas Stathis. Approximating human strategic reasoning with llm-enhanced recursive reasoners leveraging multi-agent hypergames. arXiv preprint arXiv:2502.07443, 2025.", + "Trieu H Trinh, Yuhuai Wu, Quoc V Le, He He, and Thang Luong. Solving olympiad geometry without human demonstrations. Nature, 2024.", + "Prapti Trivedi, Aditya Gulati, Oliver Molenschot, Meghana Arakkal Rajeev, Rajkumar Ramamurthy, Keith Stevens, Tanveesh Singh Chaudhery, Jahnavi Jambholkar, James Zou, and Nazneen Rajani. Self-rationalization improves llm as a fine-grained judge. arXiv preprint arXiv:2410.05495, 2024.", + "Jonathan Uesato, Nate Kushman, Ramana Kumar, Francis Song, Noah Siegel, Lisa Wang, Antonia Creswell, Geoffrey Irving, and Irina Higgins. Solving math word problems with process-and outcome-based feedback. arXiv preprint arXiv:2211.14275, 2022.", + "Karthik Valmeekam, Matthew Marquez, and Subbarao Kambhampati. Can large language models really improve by self-critiquing their own plans? arXiv preprint arXiv:2310.08118, 2023.", + "Pat Verga, Sebastian Hofstatter, Sophia Althammer, Yixuan Su, Aleksandra Piktus, Arkady Arkhangorodsky, Minjie Xu, Naomi White, and Patrick Lewis. Replacing judges with juries: Evaluating llm generations with a panel of diverse models. arXiv preprint arXiv:2404.18796, 2024.", + "Johannes Von Oswald, Eyvind Niklasson, Ettore Randazzo, Joao Sacramento, Alexander Mordvintsev, Andrey Zhmoginov, and Max Vlademyrov. Transformers learn in-context by gradient descent. In International Conference on Machine Learning, pp. 35151-35174. PMLR, 2023.", + "Tu Vu, Kalpesh Krishna, Salaheddin Alzubi, Chris Tar, Manaal Faruqui, and Yun-Hsuan Sung. Foundational autorators: Taming large language models for better automatic evaluation. arXiv preprint arXiv:2407.10817, 2024.", + "Xingchen Wan, Ruoxi Sun, Hootan Nakhost, and Sercan O Arik. Teach better or show smarter? on instructions and exemplars in automatic prompt optimization. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024a. URL https://openreview.net/forum?id=IdtoJVWvNx.", + "Yuxuan Wan, Wenxuan Wang, Yiliu Yang, Youliang Yuan, Jen-tse Huang, Pinjia He, Wenxiang Jiao, and Michael Lyu. LogicAsker: Evaluating and improving the logical reasoning ability of large language models. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (eds.), Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pp. 2124-2155, Miami, Florida, USA, November 2024b. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.128. URL https://aclanthology.org/2024.emnlp-main.128/." + ], + "bbox": [ + 112, + 102, + 883, + 924 + ], + "page_idx": 64 + }, + { + "type": "page_number", + "text": "65", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 64 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Ziyu Wan, Xidong Feng, Muning Wen, Stephen Marcus McAleer, Ying Wen, Weinan Zhang, and Jun Wang. Alphazero-like tree-search can guide large language model decoding and training. In _Forty-first International Conference on Machine Learning_, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net, 2024c. URL https://openreview.net/forum?id=C4OpREezgj.", + "Boshi Wang, Sewon Min, Xiang Deng, Jiaming Shen, You Wu, Luke Zettlemoyer, and Huan Sun. Towards understanding chain-of-thought prompting: An empirical study of what matters. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 2717-2739, Toronto, Canada, July 2023a. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.153. URL https://aclanthology.org/2023.acl-long.153/.", + "Han Wang, Archiki Prasad, Elias Stengel-Eskin, and Mohit Bansal. Soft self-consistency improves language model agents. arXiv preprint arXiv:2402.13212, 2024a.", + "Jiayu Wang, Yifei Ming, Zhenmei Shi, Vibhav Vineet, Xin Wang, Yixuan Li, and Neel Joshi. Is a picture worth a thousand words? delving into spatial reasoning for vision language models. In The Thirty-Eighth Annual Conference on Neural Information Processing Systems, 2024b.", + "Junlin Wang, Jue Wang, Ben Athiwaratkun, Ce Zhang, and James Zou. Mixture-of-agents enhances large language model capabilities, 2024c. URL https://arxiv.org/abs/2406.04692.", + "Lei Wang, Chen Ma, Xueyang Feng, Zeyu Zhang, Hao Yang, Jingsen Zhang, Zhiyuan Chen, Jiakai Tang, Xu Chen, Yankai Lin, Wayne Xin Zhao, Zhewei Wei, and Jirong Wen. A survey on large language model based autonomous agents. Frontiers of Computer Science, 18(6), March 2024d. ISSN 2095-2236. doi: 10.1007/s11704-024-40231-1. URL http://dx.doi.org/10.1007/s11704-024-40231-1.", + "Liang Wang, Nan Yang, and Furu Wei. Learning to retrieve in-context examples for large language models. In Yvette Graham and Matthew Purver (eds.), Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 1752-1767, St. Julian's, Malta, March 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.eacl-long.105/.", + "Peifeng Wang, Zhengyang Wang, Zheng Li, Yifan Gao, Bing Yin, and Xiang Ren. SCOTT: Self-consistent chain-of-thought distillation. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 5546-5558, Toronto, Canada, July 2023b. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.304. URL https://aclanthology.org/2023.acl-long.304/.", + "Peifeng Wang, Austin Xu, Yilun Zhou, Caiming Xiong, and Shafiq Joty. Direct judgement preference optimization. arXiv preprint arXiv:2409.14664, 2024f.", + "Peiyi Wang, Lei Li, Zhihong Shao, Runxin Xu, Damai Dai, Yifei Li, Deli Chen, Yu Wu, and Zhifang Sui. Math-shepherd: Verify and reinforce llms step-by-step without human annotations. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2024, Bangkok, Thailand, August 11-16, 2024, pp. 9426-9439. Association for Computational Linguistics, 2024g. URL https://doi.org/10.18653/v1/2024.acl-long.510.", + "Qineng Wang, Zihao Wang, Ying Su, Hanghang Tong, and Yangqiu Song. Rethinking the bounds of llm reasoning: Are multi-agent discussions the key?, 2024h. URL https://arxiv.org/abs/2402.18272.", + "Song Wang, Zihan Chen, Chengshuai Shi, Cong Shen, and Jundong Li. Mixture of demonstrations for in-context learning. Advances in Neural Information Processing Systems, 37:88091-88116, 2024i.", + "Tianlu Wang, Ping Yu, Xiaoqing Ellen Tan, Sean O'Brien, Ramakanth Pasunuru, Jane Dwivedi-Yu, Olga Golovneva, Luke Zettlemoyer, Maryam Fazel-Zarandi, and Asli Celikyilmaz. Shepherd: A critic for language model generation. arXiv preprint arXiv:2308.04592, 2023c." + ], + "bbox": [ + 112, + 102, + 883, + 925 + ], + "page_idx": 65 + }, + { + "type": "page_number", + "text": "66", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 65 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Tianlu Wang, Ilia Kulikov, Olga Golovneva, Ping Yu, Weizhe Yuan, Jane Dwivedi-Yu, Richard Yuanzhe Pang, Maryam Fazel-Zarandi, Jason Weston, and Xian Li. Self-taught evaluators. arXiv preprint arXiv:2408.02666, 2024j.", + "Xinyi Wang, Lucas Caccia, Oleksiy Ostapenko, Xingdi Yuan, and Alessandro Sordoni. Guiding language model reasoning with planning tokens. CoRR, abs/2310.05707, 2023d. doi: 10.48550/ARXIV.2310.05707. URL https://doi.org/10.48550/arXiv.2310.05707.", + "Xinyi Wang, Wanrong Zhu, Michael Saxon, Mark Steyvers, and William Yang Wang. Large language models are latent variable models: Explaining and finding good demonstrations for in-context learning. In Thirty-seventh Conference on Neural Information Processing Systems, 2023e. URL https://openreview.net/forum?id=BGvkwZEGt7.", + "Xuezhi Wang and Denny Zhou. Chain-of-thought reasoning without prompting. arXiv preprint arXiv:2402.10200, 2024.", + "Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc V Le, Ed H. Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models. In The Eleventh International Conference on Learning Representations, 2023f. URL https://openreview.net/forum?id=1PL1NIMMrw.", + "Yidong Wang, Zhuohao Yu, Wenjin Yao, Zhengran Zeng, Linyi Yang, Cunxiang Wang, Hao Chen, Chaoya Jiang, Rui Xie, Jindong Wang, et al. Pandalm: An automatic evaluation benchmark for llm instruction tuning optimization. In The Twelfth International Conference on Learning Representations, 2023g.", + "Yuqing Wang and Yun Zhao. Rupbench: Benchmarking reasoning under perturbations for robustness evaluation in large language models. arXiv preprint arXiv:2406.11020, 2024.", + "Zihan Wang, Yunxuan Li, Yuexin Wu, Liangchen Luo, Le Hou, Hongkun Yu, and Jingbo Shang. Multi-step problem solving through a verifier: An empirical analysis on model-induced process supervision. In Findings of the Association for Computational Linguistics: EMNLP 2024, Miami, Florida, USA, November 12-16, 2024, pp. 7309-7319. Association for Computational Linguistics, 2024k. URL https://aclanthology.org/2024-findings-emnlp.429.", + "Zihan Wang, Yunxuan Li, Yuexin Wu, Liangchen Luo, Le Hou, Hongkun Yu, and Jingbo Shang. Multi-step problem solving through a verifier: An empirical analysis on model-induced process supervision. arXiv preprint arXiv:2402.02658, 2024l.", + "Zihao Wang, Anji Liu, Haowei Lin, Jiaqi Li, Xiaojian Ma, and Yitao Liang. Rat: Retrieval augmented thoughts elicit context-aware reasoning in long-horizon generation. arXiv preprint arXiv:2403.05313, 2024m.", + "Zilong Wang, Hao Zhang, Chun-Liang Li, Julian Martin Eisenschlos, Vincent Perot, Zifeng Wang, Lesly Miculicich, Yasuhisa Fujii, Jingbo Shang, Chen-Yu Lee, and Tomas Pfister. Chain-of-table: Evolving tables in the reasoning chain for table understanding. In The Twelfth International Conference on Learning Representations, 2024n. URL https://openreview.net/forum?id=4L0xnS4GQM.", + "Peter Cathcart Wason and Philip Nicholas JohnsonLaird. Psychology of reasoning: Structure and content. Harvard University Press, 86, 1972.", + "Jason Wei, Yi Tay, Rishi Bommasani, Colin Raffel, Barret Zoph, Sebastian Borgeaud, Dani Yogatama, Maarten Bosma, Denny Zhou, Donald Metzler, et al. Emergent abilities of large language models. arXiv preprint arXiv:2206.07682, 2022a.", + "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837, 2022b." + ], + "bbox": [ + 114, + 102, + 883, + 924 + ], + "page_idx": 66 + }, + { + "type": "page_number", + "text": "67", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 66 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Yuxiang Wei, Zhe Wang, Jiawei Liu, Yifeng Ding, and Lingming Zhang. Magicoder: Empowering code generation with OSS-instruct. In Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pp. 52632-52657. PMLR, 21-27 Jul 2024. URL https://proceedings.mlr.press/v235/wei24h.html.", + "Yuxiang Wei, Olivier Duchenne, Jade Copet, Quentin Carbonneaux, Lingming Zhang, Daniel Fried, Gabriel Synnaeve, Rishabh Singh, and Sida I. Wang. Swe-rl: Advancing llm reasoning via reinforcement learning on open software evolution, 2025. URL https://arxiv.org/abs/2502.18449.", + "Nathaniel Weir, Muhammad Khalifa, Linlu Qiu, Orion Weller, and Peter Clark. Learning to reason via program generation, emulation, and search. arXiv preprint arXiv:2405.16337, 2024.", + "Sean Welleck, Amanda Bertsch, Matthew Finlayson, Hailey Schoelkopf, Alex Xie, Graham Neubig, Ilia Kulikov, and Zaid Harchaoui. From decoding to meta-generation: Inference-time algorithms for large language models. arXiv preprint arXiv:2406.16838, 2024.", + "Ying Wen, Yaodong Yang, Rui Luo, Jun Wang, and Wei Pan. Probabilistic recursive reasoning for multi-agent reinforcement learning. arXiv preprint arXiv:1901.09207, 2019.", + "Lily Weng. Llm-powered autonomous agents. *Github*, 2023. URL https://lilianweng.github.io/posts/2023-06-23-agent/.", + "Martin Weyssow, Aton Kamanda, and Houari A. Sahraoui. Codeultrafeedback: An llm-as-a-judge dataset for aligning large language models to coding preferences. CoRR, abs/2403.09032, 2024.", + "Sarah Wegreffe, Ana Marasovic, and Noah A Smith. Measuring association between labels and free-text rationales. arXiv preprint arXiv:2010.12762, 2020.", + "Sarah Wiegrefe, Jack Hessel, Swabha Swayamdipta, Mark Riedl, and Yejin Choi. Reframing human-AI collaboration for generating free-text explanations. In Marine Carpuat, Marie-Catherine de Marneffe, and Ivan Vladimir Meza Ruiz (eds.), Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 632-658, Seattle, United States, July 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.naacl-main.47. URL https://aclanthology.org/2022.naacl-main.47/.", + "Ronald J Williams. Simple statistical gradient-following algorithms for connectionist reinforcement learning. Machine learning, 8:229-256, 1992.", + "Yuhuai Wu, Albert Jiang, Wenda Li, Markus Rabe, Charles Staats, Mateja Jamnik, and Christian Szegedy. Autoformalization with large language models. In Neural Information Processing Systems (NeurIPS), 2022.", + "Zhaofeng Wu, Linlu Qiu, Alexis Ross, Ekin Akyurek, Boyuan Chen, Bailin Wang, Najoung Kim, Jacob Andreas, and Yoon Kim. Reasoning or reciting? exploring the capabilities and limitations of language models through counterfactual tasks. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 1819-1862, Mexico City, Mexico, June 2024a. Association for Computational Linguistics. doi: 10.18653/v1/2024.naacl-long.102. URL https://aclanthology.org/2024.naacl-long.102/.", + "Zhenyu Wu, Qingkai Zeng, Zhihan Zhang, Zhaoxuan Tan, Chao Shen, and Meng Jiang. Enhancing mathematical reasoning in llms by stepwise correction. arXiv preprint arXiv:2410.12934, 2024b.", + "Zijian Wu, Suozhi Huang, Zhejian Zhou, Huaiyuan Ying, Jiayu Wang, Dahua Lin, and Kai Chen. Internl m2. 5-stepprover: Advancing automated theorem proving via expert iteration on large-scale lean problems. arXiv preprint arXiv:2410.15700, 2024c." + ], + "bbox": [ + 112, + 102, + 883, + 924 + ], + "page_idx": 67 + }, + { + "type": "page_number", + "text": "68", + "bbox": [ + 488, + 948, + 508, + 959 + ], + "page_idx": 67 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Zhiheng Xi, Wenxiang Chen, Xin Guo, Wei He, Yiwen Ding, Boyang Hong, Ming Zhang, Junzhe Wang, Senjie Jin, Enyu Zhou, Rui Zheng, Xiaoran Fan, Xiao Wang, Limao Xiong, Yuhao Zhou, Weiran Wang, Changhao Jiang, Yicheng Zou, Xiangyang Liu, Zhangyue Yin, Shihan Dou, Rongxiang Weng, Wensen Cheng, Qi Zhang, Wenjuan Qin, Yongyan Zheng, Xipeng Qiu, Xuanjing Huang, and Tao Gui. The rise and potential of large language model based agents: A survey. arXiv preprint arXiv:2309.07864, 2023.", + "Zhiheng Xi, Dingwen Yang, Jixuan Huang, Jiafu Tang, Guanyu Li, Yiwen Ding, Wei He, Boyang Hong, Shihan Do, Wenyu Zhan, Xiao Wang, Rui Zheng, Tao Ji, Xiaowei Shi, Yitao Zhai, Rongxiang Weng, Jingang Wang, Xunliang Cai, Tao Gui, Zuxuan Wu, Qi Zhang, Xipeng Qiu, Xuanjing Huang, and YuGang Jiang. Enhancing llm reasoning via critique models with test-time and training-time supervision, 2024. URL https://arxiv.org/abs/2411.16579.", + "Sang Michael Xie, Aditi Raghunathan, Percy Liang, and Tengyu Ma. An explanation of in-context learning as implicit bayesian inference. In International Conference on Learning Representations, 2022.", + "Zhihui Xie, Liyu Chen, Weichao Mao, Jingjing Xu, Lingpeng Kong, et al. Teaching language models to critique via reinforcement learning. arXiv preprint arXiv:2502.03492, 2025.", + "Huajian Xin, Daya Guo, Zhihong Shao, Zhizhou Ren, Qihao Zhu, Bo Liu, Chong Ruan, Wenda Li, and Xiaodan Liang. Deepseek-prover: Advancing theorem proving in llms through large-scale synthetic data. CoRR, abs/2405.14333, 2024a. doi: 10.48550/ARXIV.2405.14333. URL https://doi.org/10.48550/arXiv.2405.14333.", + "Huajian Xin, ZZ Ren, Junxiao Song, Zhihong Shao, Wanjia Zhao, Haocheng Wang, Bo Liu, Liyue Zhang, Xuan Lu, Qiushi Du, et al. Deepseek-prover-v1. 5: Harnessing proof assistant feedback for reinforcement learning and monte-carlo tree search. arXiv preprint arXiv:2408.08152, 2024b. URL https://arxiv.org/abs/2408.08152.", + "Wei Xiong, Hanning Zhang, Chenlu Ye, Lichang Chen, Nan Jiang, and Tong Zhang. Self-rewarding correction for mathematical reasoning, 2025. URL https://arxiv.org/abs/2502.19613.", + "Austin Xu, Srijan Bansal, Yifei Ming, Semih Yavuz, and Shafiq Joty. Does context matter? contextual judgebench for evaluating llm-based judges in contextual settings. arXiv preprint arXiv:2503.15620, 2025a.", + "Austin Xu, Yilun Zhou, Xuan-Phi Nguyen, Caiming Xiong, and Shafiq Joty. J4r: Learning to judge with equivalent initial state group relative policy optimization. arXiv preprint arXiv:2505.13346, 2025b.", + "Can Xu, Qingfeng Sun, Kai Zheng, Xiubo Geng, Pu Zhao, Jiazhan Feng, Chongyang Tao, Qingwei Lin, and Daxin Jiang. Wizardlm: Empowering large pre-trained language models to follow complex instructions. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024a.", + "Fangzhi Xu, Qika Lin, Jiawei Han, Tianzhe Zhao, Jun Liu, and Erik Cambria. Are large language models really good logical reasoners? a comprehensive evaluation and beyond. IEEE Transactions on Knowledge and Data Engineering, 2025c.", + "Fengli Xu, Qianyue Hao, Zefang Zong, Jingwei Wang, Yunke Zhang, Jingyi Wang, Xiaochong Lan, Jiahui Gong, Tianjian Ouyang, Fanjin Meng, et al. Towards large reasoning models: A survey of reinforced reasoning with large language models. arXiv preprint arXiv:2501.09686, 2025d.", + "Hanwei Xu, Yujun Chen, Yulun Du, Nan Shao, Wang Yanggang, Haiyu Li, and Zhilin Yang. GPS: Genetic prompt search for efficient few-shot learning. In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang (eds.), Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 8162-8171, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.emnlp-main.559. URL https://aclanthology.org/2022.emnlp-main.559/." + ], + "bbox": [ + 112, + 102, + 883, + 924 + ], + "page_idx": 68 + }, + { + "type": "page_number", + "text": "69", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 68 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Haotian Xu, Xing Wu, Weinong Wang, Zhongzhi Li, Da Zheng, Boyuan Chen, Yi Hu, Shijia Kang, Jiaming Ji, Yingying Zhang, et al. Redstar: Does scaling long-cot data unlock better slow-reasoning systems? arXiv preprint arXiv:2501.11284, 2025e.", + "Kehan Xu, Kun Zhang, Jingyuan Li, Wei Huang, and Yuanzhuo Wang. Crp-rag: A retrieval-augmented generation framework for supporting complex logical reasoning and knowledge planning. _Electronics_, 14 (1):47, 2024b.", + "Xiaohan Xu, Ming Li, Chongyang Tao, Tao Shen, Reynold Cheng, Jinyang Li, Can Xu, Dacheng Tao, and Tianyi Zhou. A survey on knowledge distillation of large language models. arXiv preprint arXiv:2402.13116, 2024c.", + "Yige Xu, Xu Guo, Zhiwei Zeng, and Chunyan Miao. SoftCoT: Soft chain-of-thought for efficient reasoning with llms. arXiv preprint arXiv:2502.12134, 2025f.", + "Zhiwei Xu, Yunpeng Bai, Bin Zhang, Dapeng Li, and Guoliang Fan. Haven: Hierarchical cooperative multiagent reinforcement learning with dual coordination mechanism. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 37, pp. 11735-11743, 2023.", + "Yuchen Yan, Jin Jiang, Yang Liu, Yixin Cao, Xin Xu, Xunliang Cai, Jian Shao, et al. S3c-math: Spontaneous step-level self-correction makes large language models better mathematical reasoners. arXiv preprint arXiv:2409.01524, 2024.", + "An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, et al. Qwen2 technical report. arXiv preprint arXiv:2407.10671, 2024a.", + "An Yang, Beichen Zhang, Binyuan Hui, Bofei Gao, Bowen Yu, Chengpeng Li, Dayiheng Liu, Jianhong Tu, Jingren Zhou, Junyang Lin, et al. Qwen2. 5-math technical report: Toward mathematical expert model via self-improvement. arXiv preprint arXiv:2409.12122, 2024b.", + "Chengrun Yang, Xuezhi Wang, Yifeng Lu, Hanxiao Liu, Quoc V Le, Denny Zhou, and Xinyun Chen. Large language models as optimizers. In The Twelfth International Conference on Learning Representations, 2024c. URL https://openreview.net/forum?id=Bb4VGOWELI.", + "Jinghan Yang, Shuming Ma, and Furu Wei. Auto-icl: In-context learning without human supervision. arXiv preprint arXiv:2311.09263, 2023a. URL https://arxiv.org/abs/2311.09263.", + "Kaiyu Yang, Aidan Swope, Alex Gu, Rahul Chalamala, Peiyang Song, Shixing Yu, Saad Godil, Ryan Prenger, and Anima Anandkumar. LeanDojo: Theorem proving with retrieval-augmented language models. In Neural Information Processing Systems (NeurIPS), 2023b.", + "Kaiyu Yang, Gabriel Poesia, Jingxuan He, Wenda Li, Kristin Lauter, Swarat Chaudhuri, and Dawn Song. Formal mathematical reasoning: A new frontier in ai. arXiv preprint arXiv:2412.16075, 2024d.", + "Ruihan Yang, Jiangjie Chen, Yikai Zhang, Siyu Yuan, Aili Chen, Kyle Richardson, Yanghua Xiao, and Deqing Yang. Selfgoal: Your language agents already know how to achieve high-level goals. arXiv preprint arXiv:2406.04784, 2024e.", + "Zonglin Yang, Li Dong, Xinya Du, Hao Cheng, Erik Cambria, Xiaodong Liu, Jianfeng Gao, and Furu Wei. Language models as inductive reasoners. In Yvette Graham and Matthew Purver (eds.), Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 209-225, St. Julian's, Malta, March 2024f. Association for Computational Linguistics. URL https://aclanthology.org/2024.eacl-long.13/.", + "Shunyu Yao and Karthik Narasimhan. Language agents in the digital world: Opportunities and risks. _princeton-nlp.github.io_, Jul 2023. URL https://princeton-nlp.github.io/language-agent-impact/.", + "Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Thomas L. Griffiths, Yuan Cao, and Karthik R Narasimhan. Tree of thoughts: Deliberate problem solving with large language models. In Thirty-seventh Conference on Neural Information Processing Systems, 2023a. URL https://openreview.net/forum?id=5Xc1ecx01h." + ], + "bbox": [ + 112, + 102, + 883, + 922 + ], + "page_idx": 69 + }, + { + "type": "page_number", + "text": "70", + "bbox": [ + 488, + 948, + 508, + 959 + ], + "page_idx": 69 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik Narasimhan, and Yuan Cao. Re-Act: Synergizing reasoning and acting in language models. In International Conference on Learning Representations (ICLR), 2023b.", + "Weiran Yao, Shelby Heinecke, Juan Carlos Niebles, Zhiwei Liu, Yihao Feng, Le Xue, Rithesh Murthy, Zeyuan Chen, Jianguo Zhang, Devansh Arpit, et al. Retroformer: Retrospective large language agents with policy gradient optimization. arXiv preprint arXiv:2308.02151, 2023c.", + "Michihiro Yasunaga, Xinyun Chen, Yujia Li, Panupong Pasupat, Jure Leskovec, Percy Liang, Ed H. Chi, and Denny Zhou. Large language models as analogical reasoners. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=AgDICX1h50.", + "He Ye, Matias Martinez, Xiapu Luo, Tao Zhang, and Martin Monperrus. Selfapr: Self-supervised program repair with test execution diagnostics. In Proceedings of the 37th IEEE/ACM International Conference on Automated Software Engineering, pp. 1-13, 2022. URL https://arxiv.org/abs/2203.12755.", + "Jiacheng Ye, Zhiyong Wu, Jiangtao Feng, Tao Yu, and Lingpeng Kong. Compositional exemplars for in-context learning. In International Conference on Machine Learning, pp. 39818-39833. PMLR, 2023a.", + "Yixin Ye, Zhen Huang, Yang Xiao, Ethan Chern, Shijie Xia, and Pengfei Liu. Limo: Less is more for reasoning. arXiv preprint arXiv:2502.03387, 2025.", + "Yunhu Ye, Binyuan Hui, Min Yang, Binhua Li, Fei Huang, and Yongbin Li. Large language models are versatile decomposers: Decomposing evidence and questions for table-based reasoning. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '23, pp. 174-184, New York, NY, USA, 2023b. Association for Computing Machinery. ISBN 9781450394086. doi: 10.1145/3539618.3591708. URL https://doi.org/10.1145/3539618.3591708.", + "Yunhu Ye, Binyuan Hui, Min Yang, Binhua Li, Fei Huang, and Yongbin Li. Large language models are versatile decomposers: Decomposing evidence and questions for table-based reasoning. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '23, pp. 174-184, New York, NY, USA, 2023c. Association for Computing Machinery. ISBN 9781450394086. doi: 10.1145/3539618.3591708. URL https://doi.org/10.1145/3539618.3591708.", + "Yunhu Ye, Binyuan Hui, Min Yang, Binhua Li, Fei Huang, and Yongbin Li. Large language models are versatile decomposers: Decomposing evidence and questions for table-based reasoning. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '23, pp. 174-184, New York, NY, USA, 2023d. Association for Computing Machinery. ISBN 9781450394086. doi: 10.1145/3539618.3591708. URL https://doi.org/10.1145/3539618.3591708.", + "Ziyi Ye, Xiangsheng Li, Qiuchi Li, Qingyao Ai, Yujia Zhou, Wei Shen, Dong Yan, and Yiqun Liu. Beyond scalar reward model: Learning generative judge from preference data. arXiv preprint arXiv:2410.03742, 2024.", + "Edward Yeo, Yuxuan Tong, Morry Niu, Graham Neubig, and Xiang Yue. Demystifying long chain-of-thought reasoning in llms. arXiv preprint arXiv:2502.03373, 2025.", + "Shuo Yin, Weihao You, Zhilong Ji, Guoqiang Zhong, and Jinfeng Bai. Mumath-code: Combining tool-use large language models with multi-perspective data augmentation for mathematical reasoning. arXiv preprint arXiv:2405.07551, 2024.", + "Fei Yu, Hongbo Zhang, Prayag Tiwari, and Benyou Wang. Natural language reasoning, a survey. ACM Computing Surveys, 56(12):1-39, 2024a.", + "Longhui Yu, Weisen Jiang, Han Shi, Jincheng Yu, Zhengying Liu, Yu Zhang, James T Kwok, Zhenguo Li, Adrian Weller, and Weiyang Liu. Metamath: Bootstrap your own mathematical questions for large language models. arXiv preprint arXiv:2309.12284, 2023a." + ], + "bbox": [ + 112, + 102, + 883, + 926 + ], + "page_idx": 70 + }, + { + "type": "page_number", + "text": "71", + "bbox": [ + 488, + 948, + 506, + 959 + ], + "page_idx": 70 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Longhui Yu, Weisen Jiang, Han Shi, YU Jincheng, Zhengying Liu, Yu Zhang, James Kwok, Zhenguo Li, Adrian Weller, and Weiyang Liu. MetaMath: Bootstrap your own mathematical questions for large language models. In International Conference on Learning Representations (ICLR), 2024b.", + "Longhui Yu, Weisen Jiang, Han Shi, Jincheng Yu, Zhengying Liu, Yu Zhang, James T. Kwok, Zhenguo Li, Adrian Weller, and Weiyang Liu. Metamath: Bootstrap your own mathematical questions for large language models. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024c.", + "Qiying Yu, Zheng Zhang, Ruofei Zhu, Yufeng Yuan, Xiaochen Zuo, Yu Yue, Tiantian Fan, Gaohong Liu, Lingjun Liu, Xin Liu, Haibin Lin, Zhiqi Lin, Bole Ma, Guangming Sheng, Yuxuan Tong, Chi Zhang, Mofan Zhang, Wang Zhang, Hang Zhu, Jinhua Zhu, Jiaze Chen, Jiangjie Chen, Chengyi Wang, Hongli Yu, Weinan Dai, Yuxuan Song, Xiangpeng Wei, Hao Zhou, Jingjing Liu, Wei-Ying Ma, Ya-Qin Zhang, Lin Yan, Mu Qiao, Yonghui Wu, and Mingxuan Wang. Dapo: An open-source llm reinforcement learning system at scale, 2025. URL https://arxiv.org/abs/2503.14476.", + "Zhouliang Yu, Jie Fu, Yao Mu, Chenguang Wang, Lin Shao, and Yaodong Yang. Multireact: Multimodal tools augmented reasoning-acting traces for embodied agent planning. 2023b.", + "Zhuohao Yu, Chang Gao, Wenjin Yao, Yidong Wang, Wei Ye, Jindong Wang, Xing Xie, Yue Zhang, and Shikun Zhang. Kieval: A knowledge-grounded interactive evaluation framework for large language models. arXiv preprint arXiv:2402.15043, 2024d.", + "Lifan Yuan, Ganqu Cui, Hanbin Wang, Ning Ding, Xingyao Wang, Jia Deng, Boji Shan, Huimin Chen, Ruobing Xie, Yankai Lin, Zhenghao Liu, Bowen Zhou, Hao Peng, Zhiyuan Liu, and Maosong Sun. Advancing LLM reasoning generalists with preference trees. CoRR, abs/2404.02078, 2024a. doi: 10.48550/ARXIV.2404.02078. URL https://doi.org/10.48550/arXiv.2404.02078.", + "Lifan Yuan, Wendi Li, Huayu Chen, Ganqu Cui, Ning Ding, Kaiyan Zhang, Bowen Zhou, Zhiyuan Liu, and Hao Peng. Free process rewards without process labels, 2024b. URL https://arxiv.org/abs/2412.01981.", + "Siyu Yuan, Kaitao Song, Jiangjie Chen, Xu Tan, Dongsheng Li, and Deqing Yang. Evoagent: Towards automatic multi-agent generation via evolutionary algorithms. arXiv preprint arXiv:2406.14228, 2024c.", + "Weizhe Yuan, Richard Yuanzhe Pang, Kyunghyun Cho, Sainbayar Sukhbaatar, Jing Xu, and Jason Weston. Self-rewarding language models. arXiv preprint arXiv:2401.10020, 2024d.", + "Zheng Yuan, Hongyi Yuan, Chengpeng Li, Guanting Dong, Chuanqi Tan, and Chang Zhou. Scaling relationship on learning mathematical reasoning with large language models. CoRR, abs/2308.01825, 2023. doi: 10.48550/ARXIV.2308.01825. URL https://doi.org/10.48550/arXiv.2308.01825.", + "Murong Yue, Wenlin Yao, Haitao Mi, Dian Yu, Ziyu Yao, and Dong Yu. DOTS: Learning to reason dynamically in LLMs via optimal reasoning trajectories search. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=tn2mjzjSyR.", + "Xiang Yue, Xingwei Qu, Ge Zhang, Yao Fu, Wenhao Huang, Huan Sun, Yu Su, and Wenhu Chen. Mammoth: Building math generalist models through hybrid instruction tuning. arXiv preprint arXiv:2309.05653, 2023.", + "Eric Zelikman, Yuhuai Wu, Jesse Mu, and Noah Goodman. STar: Bootstrapping reasoning with reasoning. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho (eds.), Advances in Neural Information Processing Systems, 2022. URL https://openreview.net/forum?id=3ELRdg2sgI.", + "Zhiyuan Zeng, Qinyuan Cheng, Zhangyue Yin, Bo Wang, Shimin Li, Yunhua Zhou, Qipeng Guo, Xuanjing Huang, and Xipeng Qiu. Scaling of search and learning: A roadmap to reproduce o1 from reinforcement learning perspective. arXiv preprint arXiv:2412.14135, 2024.", + "Dan Zhang, Sining Zhoubian, Ziniu Hu, Yisong Yue, Yuxiao Dong, and Jie Tang. Rest-mcts*: Llm self-training via process reward guided tree search. arXiv preprint arXiv:2406.03816, 2024a." + ], + "bbox": [ + 112, + 102, + 883, + 925 + ], + "page_idx": 71 + }, + { + "type": "page_number", + "text": "72", + "bbox": [ + 488, + 948, + 508, + 959 + ], + "page_idx": 71 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Di Zhang, Jianbo Wu, Jingdi Lei, Tong Che, Jiatong Li, Tong Xie, Xiaoshui Huang, Shufei Zhang, Marco Pavone, Yuqiang Li, Wanli Ouyang, and Dongzhan Zhou. Llama-berry: Pairwise optimization for o1-like olympiad-level mathematical reasoning. CoRR, abs/2410.02884, 2024b. URL https://doi.org/10.48550/arXiv.2410.02884.", + "Jiayi Zhang, Jinyu Xiang, Zhaoyang Yu, Fengwei Teng, Xionghui Chen, Jiaqi Chen, Mingchen Zhuge, Xin Cheng, Sirui Hong, Jinlin Wang, Bingnan Zheng, Bang Liu, Yuyu Luo, and Chenglin Wu. Aflow: Automating agentic workflow generation, 2024c. URL https://arxiv.org/abs/2410.10762.", + "Jun Zhang, Trey Hedden, and Adrian Chia. Perspective-taking and depth of theory-of-mind reasoning in sequential-move games. Cognitive science, 36(3):560-573, 2012.", + "Kexun Zhang, Shang Zhou, Danqing Wang, William Yang Wang, and Lei Li. Scaling llm inference with optimized sample compute allocation. arXiv preprint arXiv:2410.22480, 2024d.", + "Lunjun Zhang, Arian Hosseini, Hritik Bansal, Mehran Kazemi, Aviral Kumar, and Rishabh Agarwal. Generative verifiers: Reward modeling as next-token prediction. In The 4th Workshop on Mathematical Reasoning and AI at NeurIPS'24, 2024e. URL https://openreview.net/forum?id=CxHRoTLmPX.", + "Lunjun Zhang, Arian Hosseini, Hritik Bansal, Mehran Kazemi, Aviral Kumar, and Rishabh Agarwal. Generative verifiers: Reward modeling as next-token prediction. arXiv preprint arXiv:2408.15240, 2024f.", + "Qizhen Zhang, Chris Lu, Animesh Garg, and Jakob Foerster. Centralized model and exploration policy for multi-agent rl. arXiv preprint arXiv:2107.06434, 2021.", + "Wentao Zhang, Lingxuan Zhao, Haochong Xia, Shuo Sun, Jiaze Sun, Molei Qin, Xinyi Li, Yuqing Zhao, Yilei Zhao, Xinyu Cai, et al. Finagent: A multimodal foundation agent for financial trading: Tool-augmented, diversified, and generalist. arXiv preprint arXiv:2402.18485, 2024g.", + "Xuan Zhang, Chao Du, Tianyu Pang, Qian Liu, Wei Gao, and Min Lin. Chain of preference optimization: Improving chain-of-thought reasoning in llms. CoRR, abs/2406.09136, 2024h. doi: 10.48550/ARXIV.2406.09136. URL https://doi.org/10.48550/arXiv.2406.09136.", + "Xuanliang Zhang, Dingzirui Wang, Longxu Dou, Qingfu Zhu, and Wanxiang Che. A survey of table reasoning with large language models. Frontiers of Computer Science, 19(9):199348, 2025a.", + "Yufeng Zhang, Fengzhuo Zhang, Zhuoran Yang, and Zhaoran Wang. What and how does in-context learning learn? bayesian model averaging, parameterization, and generalization. arXiv preprint arXiv:2305.19420, 2023.", + "Yunxiang Zhang, Muhammad Khalifa, Lajanugen Logeswaran, Jaekyeom Kim, Moontae Lee, Honglak Lee, and Lu Wang. Small language models need strong verifiers to self-correct reasoning. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Findings of the Association for Computational Linguistics: ACL 2024, pp. 15637-15653, Bangkok, Thailand, August 2024i. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-acl.924. URL https://aclanthology.org/2024-findings-acl.924/.", + "Zhehao Zhang, Yan Gao, and Jian-Guang Lou. $e^5$ : Zero-shot hierarchical table analysis using augmented LLMs via explain, extract, execute, exhibit and extrapolate. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 1244-1258, Mexico City, Mexico, June 2024j. Association for Computational Linguistics. doi: 10.18653/v1/2024.naacl-long.68. URL https://aclanthology.org/2024.naacl-long.68/.", + "Zhenru Zhang, Chujie Zheng, Yangzhen Wu, Beichen Zhang, Runji Lin, Bowen Yu, Dayiheng Liu, Jingren Zhou, and Junyang Lin. The lessons of developing process reward models in mathematical reasoning. arXiv preprint arXiv:2501.07301, 2025b.", + "Ruochen Zhao, Xingxuan Li, Shafiq Joty, Chengwei Qin, and Lidong Bing. Verify-and-edit: A knowledge-enhanced chain-of-thought framework. arXiv preprint arXiv:2305.03268, 2023." + ], + "bbox": [ + 112, + 102, + 883, + 925 + ], + "page_idx": 72 + }, + { + "type": "page_number", + "text": "73", + "bbox": [ + 488, + 948, + 508, + 959 + ], + "page_idx": 72 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Chujie Zheng, Zhenru Zhang, Beichen Zhang, Runji Lin, Keming Lu, Bowen Yu, Dayiheng Liu, Jingren Zhou, and Junyang Lin. Processbench: Identifying process errors in mathematical reasoning. CoRR, abs/2412.06559, 2024. URL https://arxiv.org/abs/2412.06559.", + "Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric P. Xing, Hao Zhang, Joseph E. Gonzalez, and Ion Stoica. Judging llmas-a-judge with mt-bench and chatbot arena. In Alice Oh, Tristan Naumann, Amir Globerson, Kate Saenko, Moritz Hardt, and Sergey Levine (eds.), Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023a. URL http://papers.nips.cc/paper_files/paper/2023/bit/91f18a1287b398d378ef22505bf41832-AAbstract-Datasets_and_Benchmarks.html.", + "Rui Zheng, Shihan Dou, Songyang Gao, Yuan Hua, Wei Shen, Binghai Wang, Yan Liu, Senjie Jin, Qin Liu, Yuhao Zhou, et al. Secrets of rlhf in large language models part i: Ppo. arXiv preprint arXiv:2307.04964, 2023b.", + "Andy Zhou, Kai Yan, Michal Shlapentokh-Rothman, Haohan Wang, and Yu-Xiong Wang. Language agent tree search unifies reasoning, acting, and planning in language models. In Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pp. 62138-62160. PMLR, 21-27 Jul 2024a.", + "Denny Zhou, Nathanael Scharli, Le Hou, Jason Wei, Nathan Scales, Xuezhi Wang, Dale Schuurmans, Claire Cui, Olivier Bousquet, Quoc V Le, and Ed H. Chi. Least-to-most prompting enables complex reasoning in large language models. In The Eleventh International Conference on Learning Representations, 2023a. URL https://openreview.net/forum?id=WZH7099tgfM.", + "Han Zhou, Xingchen Wan, Ruoxi Sun, Hamid Palangi, Shariq Iqbal, Ivan Vulic, Anna Korhonen, and Sercan Ö. Ark. Multi-agent design: Optimizing agents with better prompts and topologies, 2025a. URL https://arxiv.org/abs/2502.02533.", + "Pei Zhou, Jay Pujara, Xiang Ren, Xinyun Chen, Heng-Tze Cheng, Quoc V Le, Ed H. Chi, Denny Zhou, Swaroop Mishra, and Steven Zheng. SELF-DISCOVER: Large language models self-compose reasoning structures. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024b. URL https://openreview.net/forum?id=BR0vXhmzYK.", + "Yilun Zhou, Austin Xu, Peifeng Wang, Caiming Xiong, and Shafiq Joty. Evaluating judges as evaluators: The jetst's benchmark of llm-as-judges as test-time scaling evaluators. arXiv preprint arXiv:2504.15253, 2025b.", + "Yongchao Zhou, Andrei Ioan Muresanu, Ziwen Han, Keiran Paster, Silviu Pitis, Harris Chan, and Jimmy Ba. Large language models are human-level prompt engineers. In The Eleventh International Conference on Learning Representations, 2023b. URL https://openreview.net/forum?id=92gvk82DE-.", + "Yuxiang Zhou, Jiazheng Li, Yanzheng Xiang, Hanqi Yan, Lin Gui, and Yulan He. The mystery of in-context learning: A comprehensive survey on interpretation and analysis. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pp. 14365-14378, 2024c.", + "Qihao Zhu, Daya Guo, Zhihong Shao, Dejian Yang, Peiyi Wang, Runxin Xu, Y Wu, Yukun Li, Huazuo Gao, Shirong Ma, et al. Deepseek-coder-v2: Breaking the barrier of closed-source models in code intelligence. arXiv preprint arXiv:2406.11931, 2024a.", + "Ying Zhu, Shengchang Li, Ziqian Kong, and Peilan Xu. Graph retrieval augmented trustworthiness reasoning. arXiv preprint arXiv:2408.12333, 2024b.", + "Mingchen Zhuge, Wenyi Wang, Louis Kirsch, Francesco Faccio, Dmitrii Khizbullin, and Jürgen Schmidhuber. Language agents as estimizable graphs, 2024. URL https://arxiv.org/abs/2402.16823." + ], + "bbox": [ + 112, + 102, + 883, + 924 + ], + "page_idx": 73 + }, + { + "type": "page_number", + "text": "74", + "bbox": [ + 488, + 946, + 508, + 960 + ], + "page_idx": 73 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Jingming Zhuo, Songyang Zhang, Xinyu Fang, Haodong Duan, Dahua Lin, and Kai Chen. ProSA: Assessing and understanding the prompt sensitivity of LLMs. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (eds.), Findings of the Association for Computational Linguistics: EMNLP 2024, pp. 1950-1976, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-emnlp.108. URL https://aclanthology.org/2024 findings-emnlp.108/.", + "Daniel M. Ziegler, Nisan Stiennon, Jeffrey Wu, Tom B. Brown, Alec Radford, Dario Amodei, Paul F. Christiano, and Geoffrey Irving. Fine-tuning language models from human preferences. CoRR, abs/1909.08593, 2019. URL http://arxiv.org/abs/1909.08593.", + "Kaijian Zou, Muhammad Khalifa, and Lu Wang. Retrieval or global context understanding? on many-shot in-context learning for long-context evaluation. arXiv preprint arXiv:2411.07130, 2024." + ], + "bbox": [ + 114, + 102, + 883, + 275 + ], + "page_idx": 74 + }, + { + "type": "page_number", + "text": "75", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 74 + } +] \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09037/7cd60625-0d0d-4f3d-b38f-7ab687b11e36_model.json b/data/2025/2504_09xxx/2504.09037/7cd60625-0d0d-4f3d-b38f-7ab687b11e36_model.json new file mode 100644 index 0000000000000000000000000000000000000000..57d501970daeb1e5c3f5c8073d44a5c86a989329 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09037/7cd60625-0d0d-4f3d-b38f-7ab687b11e36_model.json @@ -0,0 +1,10580 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.285, + 0.061, + 0.712 + ], + "angle": 270, + "content": "arXiv:2504.09037v3 [cs.AI] 5 Aug 2025" + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.099, + 0.882, + 0.152 + ], + "angle": 0, + "content": "A Survey of Frontiers in LLM Reasoning: Inference Scaling, Learning to Reason, and Agentic Systems" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.183, + 0.212, + 0.198 + ], + "angle": 0, + "content": "Zixuan Ke\\*" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.204, + 0.238, + 0.22 + ], + "angle": 0, + "content": "Fangkai Jiao" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.226, + 0.212, + 0.242 + ], + "angle": 0, + "content": "Yifei Ming*" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.248, + 0.272, + 0.263 + ], + "angle": 0, + "content": "Xuan-Phi Nguyen*" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.269, + 0.21, + 0.282 + ], + "angle": 0, + "content": "Austin Xu\\*" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.289, + 0.255, + 0.305 + ], + "angle": 0, + "content": "Do Xuan Long†,‡" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.311, + 0.212, + 0.325 + ], + "angle": 0, + "content": "Minzhi Li† ‡" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.333, + 0.24, + 0.348 + ], + "angle": 0, + "content": "Chengwei Qin" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.355, + 0.238, + 0.37 + ], + "angle": 0, + "content": "Peifeng Wang*" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.376, + 0.246, + 0.389 + ], + "angle": 0, + "content": "Silvio Savarese*" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.397, + 0.247, + 0.412 + ], + "angle": 0, + "content": "Caiming Xiong*" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.419, + 0.228, + 0.434 + ], + "angle": 0, + "content": "Shafiq Joty\\*," + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.446, + 0.276, + 0.46 + ], + "angle": 0, + "content": "\\*Salesforce AI Research" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.461, + 0.349, + 0.474 + ], + "angle": 0, + "content": "Nanyang Technological University" + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.446, + 0.349, + 0.474 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.379, + 0.446, + 0.605, + 0.461 + ], + "angle": 0, + "content": "† National University of Singapore" + }, + { + "type": "text", + "bbox": [ + 0.38, + 0.461, + 0.558, + 0.474 + ], + "angle": 0, + "content": "\\(^\\ddagger I^2 R\\), \\(A^{*}STAR\\), Singapore" + }, + { + "type": "list", + "bbox": [ + 0.379, + 0.446, + 0.605, + 0.474 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.716, + 0.185, + 0.884, + 0.198 + ], + "angle": 0, + "content": "zixuan ke@salesforce.com" + }, + { + "type": "text", + "bbox": [ + 0.716, + 0.206, + 0.884, + 0.22 + ], + "angle": 0, + "content": "jiaofangkai@hotmail.com" + }, + { + "type": "text", + "bbox": [ + 0.712, + 0.228, + 0.883, + 0.242 + ], + "angle": 0, + "content": "yifei.ming@salesforce.com" + }, + { + "type": "text", + "bbox": [ + 0.724, + 0.25, + 0.883, + 0.262 + ], + "angle": 0, + "content": "xnguyen@salesforce.com" + }, + { + "type": "text", + "bbox": [ + 0.718, + 0.271, + 0.883, + 0.284 + ], + "angle": 0, + "content": "austin.xu@salesforce.com" + }, + { + "type": "text", + "bbox": [ + 0.73, + 0.292, + 0.883, + 0.305 + ], + "angle": 0, + "content": "xuanlong.do@u.nus.edu" + }, + { + "type": "text", + "bbox": [ + 0.751, + 0.313, + 0.883, + 0.325 + ], + "angle": 0, + "content": "li.minzhi@u.nus.edu" + }, + { + "type": "text", + "bbox": [ + 0.711, + 0.335, + 0.883, + 0.348 + ], + "angle": 0, + "content": "chengwei003@e.ntu.edu.sg" + }, + { + "type": "text", + "bbox": [ + 0.694, + 0.356, + 0.883, + 0.37 + ], + "angle": 0, + "content": "peifeng.wang@salesforce.com" + }, + { + "type": "text", + "bbox": [ + 0.717, + 0.378, + 0.883, + 0.39 + ], + "angle": 0, + "content": "ssavarese@salesforce.com" + }, + { + "type": "text", + "bbox": [ + 0.736, + 0.399, + 0.883, + 0.412 + ], + "angle": 0, + "content": "cxiong@salesforce.com" + }, + { + "type": "text", + "bbox": [ + 0.746, + 0.42, + 0.883, + 0.433 + ], + "angle": 0, + "content": "sjoty@salesforce.com" + }, + { + "type": "title", + "bbox": [ + 0.459, + 0.536, + 0.54, + 0.552 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.578, + 0.827, + 0.911 + ], + "angle": 0, + "content": "Reasoning is a fundamental cognitive process that enables logical inference, problem-solving, and decision-making. With the rapid advancement of large language models (LLMs), reasoning has emerged as a key capability that distinguishes advanced AI systems from conventional models that empower chatbots. In this survey, we categorize existing methods along two orthogonal dimensions: (1) Regimes, which define the stage at which reasoning is achieved (either at inference time or through dedicated training); and (2) Architectures, which determine the components involved in the reasoning process, distinguishing between standalone LLMs and agentic compound systems that incorporate external tools, and multiagent collaborations. Within each dimension, we analyze two key perspectives: (1) Input level, which focuses on techniques that construct high-quality prompts that the LLM condition on; and (2) Output level, which methods that refine multiple sampled candidates to enhance reasoning quality. This categorization provides a systematic understanding of the evolving landscape of LLM reasoning, highlighting emerging trends such as the shift from inference-scaling to learning-to-reason (e.g., DeepSeek-R1), and the transition to agentic workflows (e.g., OpenAI Deep Research, Manus Agent). Additionally, we cover a broad spectrum of learning algorithms, from supervised fine-tuning to reinforcement learning such as PPO and GRPO, and the training of reasoners and verifiers. We also examine key designs of agentic workflows, from established patterns like generator-evaluator and LLM debate to recent innovations. Finally, we identify emerging trends, such as domain-specific reasoning systems, and open challenges, such as evaluation and data quality. This survey aims to provide AI researchers and practitioners with a comprehensive foundation for advancing reasoning in LLMs, paving the way for more sophisticated and reliable AI systems." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.197, + 0.104, + 0.516, + 0.328 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.104, + 0.796, + 0.328 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.342, + 0.884, + 0.419 + ], + "angle": 0, + "content": "Figure 1: The LLM reasoning surge. We show the cumulative number (in thousands) of papers published from 2022 to 2/2025, based on Semantic Scholar keyword search. Research on reasoning regimes and agent architectures has accelerated notably since the introduction of Chain-of-Thought (CoT) in 2022. This growth is further influenced by other major developments, such as the release of ChatGPT (Ouyang et al., 2022) in 9/2022, and popularity of in-context learning (Brown et al., 2020) as an inference-time optimization method." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.45, + 0.262, + 0.466 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.487, + 0.884, + 0.579 + ], + "angle": 0, + "content": "Reasoning is the cognitive process of analyzing evidence, constructing arguments, and applying logic to form conclusions or make informed judgments. It is essential to many intellectual pursuits, including decision-making, problem-solving, and critical thinking. The study of reasoning spans multiple disciplines—philosophy (Passmore, 1961), psychology (Wason & JohnsonLaird, 1972), and computer science (Huth & Ryan, 2004)—as it provides insights into how individuals interpret information, evaluate alternatives, and develop sound conclusions using logic." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.585, + 0.884, + 0.751 + ], + "angle": 0, + "content": "Recently, large language models (LLMs) have demonstrated a range of emerging abilities, such as in-context learning (Dong et al., 2024), role playing (Shanahan et al., 2023b) and domain adaptation (Ke et al., 2023; 2025a; Ke & Liu, 2023) as they scale, with reasoning becoming one of the most critical capabilities. As shown in Figure 1, this area has rapidly gained research attention, often referred to as LLM reasoning or reasoning language model (RLM) (Besta et al., 2025). The increasing focus on this topic is understandable, as reasoning capability is: (i) Challenging, requiring multi-step processing beyond the token-by-token generative nature of auto-regressive LLMs; (ii) Fundamental, as it is a core aspect of intelligence, particularly in planning and strategic decision-making; and, most importantly, (iii) Promising, as recent advances in LLMs hint at a viable path forward. Given these factors, reasoning is widely regarded as a prerequisite for more advanced AI systems approaching Artificial General Intelligence (AGI), beyond the conventional AI that aims to closely follow instruction (Duenas & Ruiz, 2024)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.759, + 0.885, + 0.926 + ], + "angle": 0, + "content": "Reasoning requires LLMs to go beyond directly producing an answer from a question; instead, they must generate the thinking process (implicitly or explicitly) in the form of 'question \\(\\rightarrow\\) reasoning steps \\(\\rightarrow\\) answer'. It has been shown that scaling pre-training may not be the optimal solution for improving reasoning (Snell et al., 2025; OpenAI, 2025). Instead, one popular approach to achieve this is the well-known chain-of-thought (CoT) prompting (Wei et al., 2022b), which demonstrates that by modifying the prompt (e.g., 'Let us think step by step') or in-context samples, LLMs can elicit a step-by-step reasoning process at test time without additional training. Such intuitive prompting techniques have been shown to substantially improve LLMs' reasoning accuracy (Wei et al., 2022b). Building on this, the ability of LLMs to reason effectively depends on two factors: how and at what stage reasoning is achieved, and what components are involved in the reasoning process. Accordingly, in this survey, we categorize existing research into two orthogonal dimensions: (1) Regime, refers to whether reasoning is achieved through inference-time strategies (aka. inference-time" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.104, + 0.884, + 0.135 + ], + "angle": 0, + "content": "scaling) or through direct learning and adaptation (learning to reason); and (2) Architecture, refers to whether reasoning happens within a single, standalone LLM or within an interactive, agentic system." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.142, + 0.885, + 0.264 + ], + "angle": 0, + "content": "These two dimensions are orthogonal, meaning different regimes can be applied to the same architecture, and different architectures can operate under the same regime. The intersection of these dimensions allows for a more comprehensive and systematic organization of reasoning techniques, encompassing most approaches studied to date while highlighting key trends, such as the shift from inference scaling to learning-to-reason and from standalone LLMs to agentic systems. Notably, most prior surveys have focused on only one or two of these dimensions, typically inference scaling and standalone LLMs, rarely considering both together (see detailed comparison later). By introducing this categorization, we aim to provide a structured perspective that clarifies the diverse landscape of LLM reasoning and establishes a foundation for future research." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.28, + 0.306, + 0.297 + ], + "angle": 0, + "content": "1.1 Reasoning Regimes" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.308, + 0.884, + 0.4 + ], + "angle": 0, + "content": "Inference scaling CoT prompting demonstrates the potential to scale inference-time (test-time) reasoning. It has also been shown that optimal scaling of test-time compute can be more effective than scaling model parameters (Snell et al., 2024), as it improves generalization through enhanced flexibility in prompt and workflow design. Building on this, inference scaling techniques have emerged, allowing additional test-time computation before generating an answer. The key idea is that instead of updating the LLM itself, these methods aim to select the best trajectories to improve reasoning." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.406, + 0.884, + 0.513 + ], + "angle": 0, + "content": "Several variants of prompting methods (Paranjape et al., 2021; Sanh et al., 2022; Mishra et al., 2022) have been introduced, providing structured prompts to enhance reasoning. Additionally, inference scaling optimizes reasoning through search and planning (Dua et al., 2022; Zhou et al., 2023a; Khot et al., 2023; Suzgun & Kalai, 2024a). One key challenge in search and planning is evaluating the quality of candidate solutions. However, evaluating reasoning quality is inherently difficult, even for humans. Existing approaches can be categorized based on whether they judge the final outcome, i.e., outcome reward models (ORMs) (Hendrycks et al., 2021b), or the reasoning process, i.e., process reward models (PRMs) (Lightman et al., 2024)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.519, + 0.884, + 0.565 + ], + "angle": 0, + "content": "One of the most notable milestones in this direction is OpenAI's o1 (09/2024) (OpenAI et al., 2024), which demonstrate the effectiveness of inference-time scaling in complex tasks like mathematics, coding and scientific problem-solving:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.584, + 0.828, + 0.645 + ], + "angle": 0, + "content": "\"We have found that the performance of o1 consistently improves with more reinforcement learning (train-time compute) and with more time spent thinking (test-time compute). The constraints on scaling this approach differ substantially from those of LLM pretraining, and we are continuing to investigate them.\" — OpenAI o1 release blog" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.664, + 0.885, + 0.862 + ], + "angle": 0, + "content": "Learning-to-reason Another approach to unleash the deliberate thinking is updating the LLM through training. Unlike inference scaling, learning-to-reason aims to enhance reasoning capabilities through dedicated training, reducing reliance on costly inference-time computations. However, a key challenge in this regime is the scarcity of training data, as step-by-step human-annotated reasoning trajectories are prohibitively expensive to collect. To address this, research has focused on automatically generating such trajectories and developing effective training strategies to leverage them. For example, supervised fine-tuning with long CoT (Muennighoff et al., 2025) or preference learning with reasoning preference data, with DPO (Rafailov et al., 2023) as a representative approach. More recent approaches even bypass reasoning annotation by using reinforcement learning (RL), with recent work like GRPO (Shao et al., 2024) demonstrating remarkable success in this direction. A significant milestone in this direction is DeepSeek-R1 (01/2025) (DeepSeek-AI et al., 2025), an open-source model that achieves performance comparable to OpenAI's o1 while requiring far fewer computational resources. It further reveals that RL alone is possible to learn the sophisticated behaviors just as the test-time computation increase:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.88, + 0.828, + 0.927 + ], + "angle": 0, + "content": "\"One of the most remarkable aspects of this self-evolution is the emergence of sophisticated behaviors as the test-time computation increases. Behaviors such as reflection—where the model revisits and reevaluates its previous steps—and the exploration of alternative ap-" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.115, + 0.1, + 0.885, + 0.281 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.118, + 0.294, + 0.878, + 0.31 + ], + "angle": 0, + "content": "Figure 2: The proposed categorization over regimes, architectures, and unified perspectives in this survey." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.344, + 0.828, + 0.39 + ], + "angle": 0, + "content": "proaches to problem-solving arise spontaneously. These behaviors are not explicitly programmed but instead emerge as a result of the model's interaction with the reinforcement learning environment.\" — DeepSeek-R1 'Aha moment'" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.419, + 0.395, + 0.433 + ], + "angle": 0, + "content": "1.2 Reasoning System Architecture" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.449, + 0.884, + 0.526 + ], + "angle": 0, + "content": "Standalone LLM and agentic systems Orthogonal to the regimes, studies have explored architectural advancements in LLM reasoning, moving beyond next-token prediction in standalone models to embrace agentic systems—AI systems that exhibit interactivity and autonomy to refine reasoning and decision-making. These systems go beyond the challenges of inference scaling or learning to reason; they introduce system-level complexities, such as designing workflows and coordinating potentially conflicting actions." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.549, + 0.884, + 0.716 + ], + "angle": 0, + "content": "Single-Agent and multi-agent systems To distinguish agentic systems from standalone LLMs, we adopt the perspective of Kapoor et al. (2024), framing agentic behavior as a spectrum. We categorize these systems into two families: single-agent and multi-agent. In single-agent systems, a single LLM interacts with tools in its environment to refine reasoning, actions, and perceptions. These tools include external knowledge bases (Ke et al., 2024; Hammane et al., 2024; Sun et al., 2023), verifiers (Wan et al., 2024c; Guan et al., 2025), and practical applications like code interpreters, calendars, and maps (Yu et al., 2023b; Lu et al., 2024a). By leveraging these resources, the LLM iteratively enhances its decision-making and problem-solving capabilities. Recent milestones in single-agent systems, such as Grok 3 Deep Search (02/2025) and OpenAI Deep Research (02/2025), demonstrate how agents interact with the web to significantly improve reasoning, perform tasks like information retrieval, use code interpreters for calculations, and aggregate data from multiple sources." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.745, + 0.828, + 0.82 + ], + "angle": 0, + "content": "\"Deep research independently discovers, reasons about, and consolidates insights from across the web. To accomplish this, it was trained on real-world tasks requiring browser and Python tool use ... While o1 demonstrates impressive capabilities in coding, math, and other technical domains, many real-world challenges demand extensive context and information gathering from diverse online sources.\" — OpenAI deep research release blog" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.85, + 0.885, + 0.926 + ], + "angle": 0, + "content": "The second family, multi-agent systems, goes beyond agent-environment interactions by enabling agent-agent communication. Each agent takes on a distinct role and exchanges messages with others. Key challenges include designing effective communication protocols—whether collaborative (Chen et al., 2023c) or adversarial (Liang et al., 2023b)—and coordinating actions to reach consensus on the final action for the environment. A recent example of this potential is Manus, a popular product showcasing the power of multi-agent systems." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.114, + 0.104, + 0.31, + 0.119 + ], + "angle": 0, + "content": "1.3 Unified Perspectives" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.131, + 0.884, + 0.298 + ], + "angle": 0, + "content": "Although inference scaling and learning-to-reason take different approaches to improving reasoning, they are inherently connected. Inference scaling focuses on selecting the best reasoning trajectories, while learning-to-reason leverages both good and bad trajectories as training data. To unify these approaches, we categorize reasoning trajectory collection techniques in both regimes based on two key perspectives: input and output. At the input level, techniques modify or augment prompts to guide the LLM toward desirable reasoning paths. At the output level, the LLM generates multiple candidate responses, which are then evaluated, ranked, or refined. This framework highlights that many inference scaling techniques—such as prompt modification or trajectory search—can be repurposed for trajectory collection in learning-to-reason (as described in Section 3 and Section 5). Moreover, this connection shows that the two approaches are complementary: inference scaling methods can be applied to models trained under learning-to-reason, motivating the development of inference-aware learning-to-reason methods (Section 5.4)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.305, + 0.884, + 0.398 + ], + "angle": 0, + "content": "These aspects are also effective across different architectures. Similar to standalone LLMs, we categorize techniques based on input and output perspectives. However, to align with agentic system conventions, we use perception as input (to an agent) and action as output (of an agent) in single-agent systems. For multi-agent systems, we consider communication as input (to a participating agent) and coordination as output (of the system). This analogy provides a unified perspective across regimes and architectures, offering a systematic and generalizable framework for analyzing LLM reasoning (see Figure 2)." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.414, + 0.409, + 0.43 + ], + "angle": 0, + "content": "1.4 Goal and Structure of the Survey" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.441, + 0.884, + 0.534 + ], + "angle": 0, + "content": "The goal of this survey is to provide a comprehensive overview of key algorithmic details and major milestones in LLM reasoning research, particularly since the emergence of Chain-of-Thought (CoT), across both regime and architecture dimensions. We believe this is a timely and valuable contribution to the community, given the clear acceleration in research following CoT's introduction in 2022 (Figure 1). The rapid growth in studies exploring all aspects of LLM reasoning—from regimes and architectures to training algorithms—highlights the increasing importance and utility of reasoning capabilities in advancing the field." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.54, + 0.882, + 0.678 + ], + "angle": 0, + "content": "Figure 2 provides an overview of the categorization in this survey, organized along two orthogonal dimensions. Within each architecture, there are two key perspectives to consider. The first perspective is input, or perception, or communication. This concerns how to construct a better prompt, refine the given observations from the environment, or establish protocols for exchanging messages with other agents. The second is output—encompassing action or coordination—which involves aggregating outputs, enhancing actions, or coordinating actions to produce a final result. While the figure illustrates high-level categorizations, the following sections delve into more specific terms. For example, 'input' is discussed in terms of constructing prompts (see e.g., Sections 3.1.1 and 5.1.1), while 'output' relates to optimizing output and collecting high-quality trajectories (e.g., Sections 3.1.2 and 5.1.2)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.683, + 0.884, + 0.775 + ], + "angle": 0, + "content": "Figure 3 outlines the structure of this survey. We start with a brief introduction to the background, covering key terminologies, components, regimes, and architectures (Section 2). The subsequent sections explore inference scaling (Section 3), learning algorithms for reasoners and verifiers (Section 4), and learning to reason (Section 5). Within the discussions on inference scaling and learning to reason, we examine three key architectures: Standalone LLMs, Single-Agent systems, and Multi-Agent systems. Finally, Section 6 summarizes key insights and discusses open challenges and future directions." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.792, + 0.394, + 0.807 + ], + "angle": 0, + "content": "1.5 Comparison to Related Surveys" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.819, + 0.882, + 0.926 + ], + "angle": 0, + "content": "Reasoning in LLMs has long been a fundamental challenge in the field. Earlier works, such as Huang & Chang (2023), provide a comprehensive overview of the evolution of informal deductive reasoning covering developments prior to the emergence of LLM agents and Reasoning Language Models (RLMs). Our work extends this discussion by focusing on LLM agents and RLMs. Qiao et al. (2023b) offer a detailed summary of advancements in LLM reasoning, with a particular emphasis on prompting techniques. In contrast, we offer a broader range of regimes (from inference to training) and architectures (from standalone LLM to multi-agent systems). Readers interested in a formal definition and taxonomy of natural language reasoning—grounded" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.113, + 0.101, + 0.905, + 0.744 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.112, + 0.752, + 0.885, + 0.8 + ], + "angle": 0, + "content": "Figure 3: Taxonomy of LLM reasoning research organized in this survey by regimes (inference scaling, learning to reason) and architectures (standalone LLM, single-agent, multi-agent). Each leaf node includes examples from the literature that focus on the corresponding category." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.827, + 0.884, + 0.859 + ], + "angle": 0, + "content": "in philosophical foundations—may refer to Yu et al. (2024a), which focuses specifically on this direction and is complementary to our scope." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.865, + 0.884, + 0.926 + ], + "angle": 0, + "content": "Improvements in LLM reasoning are closely tied to advancements in a variety of techniques. Dong et al. (2024) present a comprehensive survey on in-context learning (ICL), while Zhou et al. (2024c) explore the interpretation and analysis of ICL from both theoretical and empirical perspectives. In contrast, our work organizes ICL techniques under different regimes—standalone LLMs, single-agent, and multi-agent" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.104, + 0.884, + 0.226 + ], + "angle": 0, + "content": "systems—highlighting how these techniques evolve and interact within each setting. Recent studies suggest that enhancements in reasoning are often linked to inference scaling. Dong et al. (2024) provide an extensive review of inference-time self-improvement, and Welleck et al. (2024) offer a survey focused on three key themes: token-level generation algorithms, meta-generation algorithms, and efficient generation. Following the release of Reasoning Language Models (RLMs) such as OpenAI's o1 and DeepSeek's R1, there has been a significant increase in research dedicated to learning-to-reason approaches. Zeng et al. (2024) and Xu et al. (2025d) provide thorough surveys on these emerging developments. However, these surveys primarily focus on LLMs, and do not address agentic or multi-agent reasoning settings in depth." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.232, + 0.884, + 0.34 + ], + "angle": 0, + "content": "Research on LLM reasoning has predominantly centered on logical and mathematical reasoning. Liu et al. (2025a) offer a comprehensive survey of logical reasoning in LLMs, delving into its theoretical foundations and associated benchmarks. In their position paper, Yang et al. (2024d) underscore the pivotal role of formal mathematical reasoning, showcasing its superiority over traditional NLP-based methods in generating verifiable proofs and automated feedback. Their work outlines progress in theorem proving and auto-formalization while identifying key challenges that remain. While we cover domain-specific reasoning in Section 6.1.3, we refer readers to Liu et al. (2025a) and Yang et al. (2024d) for a more in-depth treatment of these topics." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.345, + 0.884, + 0.436 + ], + "angle": 0, + "content": "Reasoning is a critical capability in agentic systems (Pezeshkpour et al., 2024; Masterman et al., 2024). While numerous reviews focus on agent systems (Xi et al., 2023; Kapoor et al., 2024), discussions on reasoning within these systems remain limited. A concurrent work by Besta et al. (2025) introduces a comprehensive and modular framework for RLMs that systematically organizes key components such as reasoning structures, strategies, benchmarks and learning algorithms. However, their work does not delve into agentic and multiagent LLM systems.1" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.444, + 0.882, + 0.61 + ], + "angle": 0, + "content": "This survey provides a comprehensive overview of major milestones in LLM reasoning research, emphasizing two key dimensions: (1) the evolution of learning schemes—from inference scaling to learning-to-reason approaches—and (2) architectural advancements—from single LLMs to multi-agent systems. These dimensions summarize recent progress and lay the groundwork for future reasoning LLMs and agentic systems. We unify techniques under input and output perspectives, clarifying what must be customized or designed when building reasoning systems. Additionally, we detail essential techniques, including a comparison of the latest learning algorithms (e.g., RL) and an in-depth discussion of refiners and verifiers, which are critical for facilitating reasoning. Given these contributions, our survey is timely, offering AI researchers up-to-date insights into the field. We anticipate further research along these dimensions, such as agent-human regimes (Liang et al., 2024) and automated workflow design architectures (Hu et al., 2025; Zhang et al., 2024c; Zhou et al., 2025a)." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.629, + 0.258, + 0.647 + ], + "angle": 0, + "content": "2 Background" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.662, + 0.79, + 0.679 + ], + "angle": 0, + "content": "In this section, we introduce foundational concepts that will be utilized throughout the paper." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.695, + 0.317, + 0.71 + ], + "angle": 0, + "content": "2.1 Problem Formulation" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.722, + 0.882, + 0.783 + ], + "angle": 0, + "content": "LLM reasoning is often formulated within the Markov Decision Process (MDP) framework (Bellman, 1958), treating reasoning as a sequential decision-making process. While many of the terminologies in LLM reasoning originate from the AI agent and reinforcement learning (RL) literature (Russell & Norvig, 2010), their meaning in LLM reasoning can sometimes differ to suit the nature of LLM-based reasoning." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.799, + 0.884, + 0.89 + ], + "angle": 0, + "content": "Reasoning step and thought The definition of what makes a reasoning step can vary depending on the specific inference or learning algorithm used, and it often depends on the granularity at which rewards (or feedback) are considered. Generally, a reasoning step can be expressed as a sequence of tokens \\(a_{t} = (x_{t_{1}},\\ldots ,x_{t_{K}})\\), where \\(x_{t_k}\\) is the \\(k\\)-th token at inference step \\(t\\). Typically, \\(a_{t}\\) represents a coherent step in reasoning (Lightman et al., 2024), such as a logical deduction or an intermediate conclusion. However, in extreme cases, a reasoning step can be the entire response (Zhang et al., 2024b; DeepSeek-AI et al., 2025)" + }, + { + "type": "page_footnote", + "bbox": [ + 0.112, + 0.899, + 0.884, + 0.926 + ], + "angle": 0, + "content": "To avoid redundancy with existing literature, we do not include an analysis of reasoning benchmarks in this survey. For a detailed discussion of benchmarks, we direct readers to Xu et al. (2025d); Besta et al. (2025)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.119, + 0.102, + 0.88, + 0.224 + ], + "angle": 0, + "content": "
SymbolName/terminologyExplanation
atAction/responseThe reasoning step or action taken at time step t , where t ∈ {1,2,...,T}
stState/contextst := (q, a1, ..., at-1), where q is the prompt/question.
RReward model/verifierEvaluates the reasoning quality of action at state st, providing feedback.
rtRewardrt := R(st, at), reward given by verifier at time step t.
τTrajectoryτ := ((s0, a0, r0), ..., (sT, aT, rT)), The entire reasoning process leading to an answer.
πPolicy model/reasonerat ~ π(at|st): The reasoning strategy that maps a reasoning state to the next reasoning step.
VValue ModelEstimates the expected future reasoning quality from state st.
FRefinera′t = F(st, at, rt): Modifies or refines the action based on feedback from the verifier.
" + }, + { + "type": "table_caption", + "bbox": [ + 0.254, + 0.233, + 0.744, + 0.249 + ], + "angle": 0, + "content": "Table 1: An overview of symbols and terminologies for convenience." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.275, + 0.884, + 0.322 + ], + "angle": 0, + "content": "or a single token (Schulman et al., 2017; Ouyang et al., 2022).2 The term Thought generally refers to the sequence of reasoning steps (i.e., reasoning trajectory) that occur from the question (excluding the question itself) to the final answer (excluding the final answer)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.337, + 0.885, + 0.472 + ], + "angle": 0, + "content": "Reasoning as MDP An MDP is a general framework for modeling environments where an agent makes sequential decisions by observing states and receiving rewards for its actions. The state-action-reward trajectories in an MDP can be formally expressed as: \\(\\tau = ((s_0, a_0, r_0), \\ldots, (s_T, a_T, r_T))\\), where \\(T\\) is the trajectory length. Naturally, LLM reasoning can be framed as an MDP, as each reasoning step builds upon previous ones to arrive at a final answer \\((s_T)\\) from a question \\((s_0)\\). However, a key distinction lies in how the state transition function \\(P(s_{t+1} | s_t, a_t)\\) is defined. In traditional MDPs, state transitions are driven by the environment (unknown to the agent). In LLM reasoning, this depends on the system architecture: in standalone LLMs, the model itself generates the next state, whereas in agentic systems, state transitions can be influenced by external tools within the environment." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.48, + 0.884, + 0.496 + ], + "angle": 0, + "content": "In RL-based approaches, the goal is to maximize the reasoning quality measured by the cumulative reward:" + }, + { + "type": "equation", + "bbox": [ + 0.401, + 0.514, + 0.884, + 0.556 + ], + "angle": 0, + "content": "\\[\n\\max \\mathbb {E} _ {\\tau \\sim P (\\tau | s _ {0}, \\pi)} \\left[ \\sum_ {t = 1} ^ {T} r _ {t} \\right], \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.562, + 0.885, + 0.668 + ], + "angle": 0, + "content": "where \\(\\pi\\) is the reasoning policy and \\(r_t = \\mathcal{R}(s_t, a_t)\\) is the reward given by the reward function \\(\\mathcal{R}\\) at time step \\(t\\). There are two primary approaches to optimize Equation 1. The first is via training, which involves optimizing model parameters to learn the optimal policy \\(\\pi\\) through methods like preference learning (e.g., DPO (Rafailov et al., 2023)) or reinforcement learning (e.g., PPO (Schulman et al., 2017)). The second is inference-scaling, which optimizes Equation 1 without altering model parameters. Instead, it employs a form of \"search\" with a frozen model, often guided by a reward model (Zhang et al., 2025b). We summarize key terminologies in Table 1." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.684, + 0.5, + 0.701 + ], + "angle": 0, + "content": "2.2 Key Components of LLM Reasoning Systems" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.712, + 0.884, + 0.818 + ], + "angle": 0, + "content": "An LLM-based reasoning system may contain three key components depending on the reasoning regime and system architecture: (a) A Reasoner that generates the reasoning steps, serving as the policy model; (b) Verifiers that evaluate the correctness of the final outcome and/or reasoning steps, serving as reward functions; and (c) A Refiner that improves reasoning trajectories by refining responses based on the feedback from the verifier. Figure 4 shows a depiction of these components. While these components play complementary and important roles in a reasoning system, they can be implemented by the same LLM, e.g., self-refinement (Saunders et al., 2022; Madaan et al., 2024) unifies them." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.833, + 0.884, + 0.88 + ], + "angle": 0, + "content": "Reasoner The reasoner generates reasoning steps based on the current state of the reasoning process. It takes as input the previous states and outputs the next response or action. As the core component of a reasoning system, it determines how reasoning progresses and influences the final outcome." + }, + { + "type": "page_footnote", + "bbox": [ + 0.111, + 0.887, + 0.884, + 0.926 + ], + "angle": 0, + "content": "2Although RLHF (Reinforcement Learning from Human Feedback) methods (Ouyang et al., 2022) receive rewards based on the final answer (outcome level), the underlying RL algorithms operate as multi-step RL at the token level. This differs from approaches like DeepSeek-R1 (DeepSeek-AI et al., 2025), which employs one-step RL for training." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.307, + 0.102, + 0.693, + 0.35 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.36, + 0.887, + 0.452 + ], + "angle": 0, + "content": "Figure 4: Three key components of a reasoning system. The Reasoner proposes new responses (usually accompanied with rationales) for a query. The Verifier takes as input a verification instruction (e.g., what aspects to evaluate) and the response(s) from the reasoner, then outputs a judgment on the response(s) (often in the form of a numeric score or relative order, and typically accompanied by a natural language critique or rationale for its judgment). The Refiner, unlike the first two, takes as input an incorrect response and optionally the critique (as provided by the verifier) and outputs a revised response." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.477, + 0.885, + 0.57 + ], + "angle": 0, + "content": "**Verifier** The verifier assesses the quality of the final answer or intermediate reasoning steps and provides feedback to the reasoner. Verifiers can be outcome-level, where only the outcome is evaluated, or process-level, where intermediate reasoning steps are also evaluated. The type of feedback can range from a scalar reward (e.g., correct/wrong answer on a math problem or pass/fail for code test case) to natural language explanations. When ground-truth is available (e.g., during training), the verifier can be implemented using rule-based functions (e.g., string matching) or by training a reward model or using an LLM-judge model." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.583, + 0.884, + 0.675 + ], + "angle": 0, + "content": "Refiner Given a feedback from the verifier, as well as a response from the reasoner, a refiner tries to improve and polish the original reasoning trajectory containing flaws. Refiners can play two important roles in reasoning. First, it can serve as a general approach to improve the performance during inference. More importantly, by providing explicit analysis, a refiner can also conduct implicit search, i.e., pointing out the obstacles in current trajectory, and offer a new perspective to compress the search space. Yet, recent studies (Qu et al., 2024a) show that is not at least easier than learning reasoning." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.69, + 0.32, + 0.707 + ], + "angle": 0, + "content": "2.3 System Architectures" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.718, + 0.885, + 0.795 + ], + "angle": 0, + "content": "Building on the three key components introduced above, in this section, we describe how these elements are organized within different system architectures to achieve effective reasoning. While the three components serve as the foundation, their integration and interaction vary across architectural paradigms. In this survey, we structure reasoning systems into three main types: standalone LLM, single-agent system, and multi-agent system. Figure 5 shows their comparison with visualizations." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.809, + 0.365, + 0.825 + ], + "angle": 0, + "content": "2.3.1 Standalone LLM Systems" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.834, + 0.885, + 0.926 + ], + "angle": 0, + "content": "A standalone LLM system comprises a single LLM which can play the role of one or more components (we refer this as unified components) in the reasoning system. It processes an input prompt and generates final outputs, which often include rationales or reasoning steps. As an LLM, it has the capability to produce diverse rationales through sampling—a key property utilized by many advanced reasoning techniques. Importantly, a standalone LLM operates independently, without interacting with external environments or collaborating with other LLMs. Its decision-making is based solely on simple input-output mappings or through iterative" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.961 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.115, + 0.101, + 0.885, + 0.442 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.112, + 0.45, + 0.884, + 0.482 + ], + "angle": 0, + "content": "Figure 5: Three architecture types used for designing a reasoning system in the context of LLMs. highlights perspectives that the literature emphasizes for customization." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.512, + 0.884, + 0.558 + ], + "angle": 0, + "content": "sampling from the same model, where the prompt incorporates prior reasoning steps (a method known as self-contained reasoning). This self-contained nature allows the LLM to function autonomously while maintaining coherence in its reasoning processes." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.575, + 0.499, + 0.591 + ], + "angle": 0, + "content": "2.3.2 From Standalone LLM to Language Agents" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.602, + 0.884, + 0.816 + ], + "angle": 0, + "content": "While the concept of an agent has been a long-standing idea in AI (Russell & Norvig, 2010), the notion of language agents has gained prominence alongside recent advancements in LLMs. The key distinction between an agent and a standalone LLM lies in two advanced capabilities: interactivity (Weng, 2023; Yao & Narasimhan, 2023) and autonomy (Xi et al., 2023; Wang et al., 2024d). Interactivity refers to an agent's ability to engage with the external world, including environments or other agents. This capability is crucial because LLMs, while powerful, often have limited knowledge and reasoning abilities confined to their internal memory. By enabling interaction with the external world, an LLM can augment its internal knowledge with external information, significantly expanding its understanding and grounding its outputs in real-world observations. Autonomy, on the other hand, refers to an agent's ability not only to follow human instructions but also to independently initiate and execute actions. This capability often involves planning but can extend to more complex behaviors. For instance, a fully autonomous agent should be capable of detecting novel situations, proactively taking initiative, and determining effective interaction strategies without explicit human guidance. These advanced capabilities distinguish LLM-based agents from standalone LLMs, enabling them to operate more dynamically and adaptively in real-world scenarios." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.821, + 0.884, + 0.898 + ], + "angle": 0, + "content": "To delineate the boundary between the agent and its environment, we employ the concept of controllability (Sumers et al., 2024). Specifically, the environment is defined as an external module that the agent cannot modify. For example, a knowledge base containing resources like Wikipedia or a compiler is considered part of the environment because the agent cannot alter it. Similarly, another LLM acting as a judge or verifier is also treated as part of the environment, as its outputs operate independently of the agent. In contrast," + }, + { + "type": "page_footnote", + "bbox": [ + 0.131, + 0.911, + 0.768, + 0.925 + ], + "angle": 0, + "content": "3In this survey, the terms agent and LLM-based agent are used interchangeably unless stated otherwise." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.104, + 0.884, + 0.135 + ], + "angle": 0, + "content": "components like working memory or prompts that the agent can directly modify are not classified as part of the environment." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.142, + 0.885, + 0.219 + ], + "angle": 0, + "content": "In this work, we adopt the perspective of Kapoor et al. (2024), which conceptualizes agentiness as a spectrum. The more interactiveness and autonomy an LLM exhibits, the more agentic it is considered to be. In the upper right of Figure 5, we illustrate this spectrum visually. Within this spectrum, we define a system with agent-environment interaction as a single-agent system and a system that additionally incorporates agent-agent communication as a multi-agent system." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.234, + 0.334, + 0.25 + ], + "angle": 0, + "content": "2.3.3 Single-agent Systems" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.26, + 0.884, + 0.352 + ], + "angle": 0, + "content": "Given the definitions above, the interaction between the agent and its environment is a central aspect of single-agent systems. These interactions can vary widely in complexity and design. In Figure 5, we illustrate a single-agent system in the bottom left. The focus here is on designing the agent's actions—such as tool use, retrieval, or answer refinement—and obtaining useful perceptions from the environment, which may include feedback from an external verifier or compiler, or data from a knowledge base (KB). This architecture enhances the LLM's capabilities by enabling it to dynamically engage with and adapt to external contexts." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.358, + 0.884, + 0.421 + ], + "angle": 0, + "content": "While a fully autonomous agent should ideally learn to interact with the environment automatically, the literature identifies several predefined interaction patterns (also referred to as workflows (Schluntz & Zhang, 2024)) that have proven effective. We elaborate on these patterns below and, in Sections 3.2 and 5.2, explore specific techniques that leverage them to improve agent performance." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.427, + 0.882, + 0.593 + ], + "angle": 0, + "content": "- Generator-evaluator pattern. This pattern divides the reasoning capability into two distinct components: a generator and an evaluator (e.g., a verifier or other evaluators like compilers). It represents a natural extension of RL-style optimization and has gained popularity since the introduction of RLHF (Ouyang et al., 2022). In this setup, the evaluator functions as the environment, providing feedback on the quality of the agent's actions. Such feedback is particularly valuable for guiding the search for effective actions and improving decision-making. Recent studies have demonstrated that verifiers can significantly enhance the performance and generalization capabilities of agents (Zhang et al., 2024i; Sun et al., 2024c). However, this pattern is not without its challenges. It can suffer from unreliable components and error propagation. For instance, Kim et al. (2024d) points out that verifiers are vulnerable to reward hacking, where the reasoner exploits loopholes in the verifier to achieve higher reward scores, ultimately degrading the overall performance of the agentic system." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.6, + 0.882, + 0.738 + ], + "angle": 0, + "content": "- Generator-critic-refiner pattern This pattern divides reasoning capabilities into three components: a reasoner, a critic, and a refiner. The critic acts as the environment, providing feedback—typically in the form of guidance on how to correct errors in the generated actions. The refiner then takes the flawed actions and the critic's feedback as input, producing revised and improved actions. This pattern enables the agentic system to benefit from iterative feedback, making it particularly effective for complex tasks where the initial outputs of the reasoner are suboptimal. However, it may also lead to a phenomenon known as 'over-refinement' (Chen et al., 2024b), where the agent iterates excessively, leading to diminishing returns or even degraded performance rather than improvement. Careful design and balancing of the refinement process are essential to mitigate this risk and ensure the pattern's effectiveness." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.753, + 0.33, + 0.768 + ], + "angle": 0, + "content": "2.3.4 Multi-agent Systems" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.779, + 0.882, + 0.901 + ], + "angle": 0, + "content": "In addition to the agent-environment loop in single-agent systems, multi-agent systems introduce an additional agent-agent loop, where multiple agents interact and influence one another. In this framework, agents assume different roles, exchange messages, and collaboratively coordinate their actions while operating within a shared environment.4 Figure 5 shows an example multi-agent system. It involves \\( N \\) agents (often playing distinct roles) and \\( M \\) rounds of communication through message exchanges. The focus is on designing effective communication protocols (e.g., debates) and coordinating the agents' actions to determine a final decision or action within the environment (e.g., employing an additional judge to adjudicate final actions). The following communication patterns have emerged as effective predefined strategies:" + }, + { + "type": "page_footnote", + "bbox": [ + 0.131, + 0.911, + 0.794, + 0.925 + ], + "angle": 0, + "content": "4We use message to denote agent-agent communication and action to denote agent-environment interaction." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.269, + 0.1, + 0.711, + 0.293 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.303, + 0.884, + 0.425 + ], + "angle": 0, + "content": "Figure 6: Inference-time and training-time regimes of a reasoning system. We use tree search as an example to illustrate the inference scaling and trajectories collection. Given a query, inference scaling relies on extensive inference computation to improve the reasoner's distribution. Specifically, it generates multiple candidate reasoning steps at each layer and selects the best solution to proceed (e.g., by using an external verifier or assembling). In contrast, learning to reason focuses on collecting trajectories and training from the collected data with minimal inference-time computation. It takes all trajectories in the process (identical to those used in inference-scaling, allowing us to reuse the same tree) and labels them with preferences. The preference data can then be used to train the reasoner." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.469, + 0.884, + 0.559 + ], + "angle": 0, + "content": "- Debate pattern. In this pattern, two or more agents engage in a debate with each other. The term debate can vary in implementation. For example, in (Wang et al., 2024h), it involves agents addressing the problem independently and incorporating other agents' responses as additional advice. In (Liang et al., 2023b), it means agents approach the problem from opposing perspectives. After the debate, a consensus is reached through mechanisms such as an additional judge, weighted voting, or a fixed number of iterations, ultimately determining the collective action to be taken in the environment." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.567, + 0.884, + 0.643 + ], + "angle": 0, + "content": "- Reconcile pattern. This pattern facilitates collaborative round-table discussions among agents, enabling them to reach a consensus through mechanisms such as voting or confidence levels. For instance, ReConcile (Chen et al., 2023c) introduce a round-table discussion framework where agents make decisions using a weighted voting system. In this process, each agent assigns a confidence level to its proposed answers, and these confidence levels are used as weights to cast votes, ultimately determining the final decision." + }, + { + "type": "list", + "bbox": [ + 0.111, + 0.469, + 0.884, + 0.643 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.679, + 0.304, + 0.695 + ], + "angle": 0, + "content": "2.4 Reasoning Regimes" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.714, + 0.885, + 0.925 + ], + "angle": 0, + "content": "Orthogonal to the components and architectures discussed above, reasoning systems can operate under distinct computational regimes. Systems employing inference-time computation can refine their outputs through iterative reflection and revision or search for improved solutions by repeatedly sampling the underlying model. However, such systems must balance cost (e.g., computational resources, latency) and effectiveness (e.g., accuracy, reliability) in achieving correct solutions. The learning-to-reason paradigm addresses this tradeoff by shifting computational burdens from inference to training, learning policies from simulated reasoning processes. While both regimes enhance effectiveness by redistributing computational effort across training and inference, they lack the capacity to dynamically adapt resource allocation or method selection to individual problems—a limitation highlighted in recent work (Sprague et al., 2024a; Kapoor et al., 2024; Chen et al., 2024d). To bridge this gap, emerging approaches within the learning-to-reason framework focus on optimizing the reasoning process itself, jointly minimizing cost and maximizing effectiveness. This involves dynamically allocating computational resources, searching for contextually optimal methods, and training models to synergize with adaptive inference-time strategies. Figure 6 contrasts these regimes, and we elaborate on each in the sections below." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.119, + 0.102, + 0.88, + 0.171 + ], + "angle": 0, + "content": "
PerspectiveMethodCharacteristicRepresentative Work
Constructing PromptsInstruction engineeringModify instruction by human-design templateParanjape et al. (2021); Zhou et al. (2023b)
Demonstration engineeringDrawing analogy from relevant experienceWei et al. (2022b); Luo et al. (2024d)
Prompt optimizationSearch for optimized prompt (e.g., bootstrap)Xu et al. (2022); Pryzant et al. (2023)
Optimizing OutputGenerating subtasksDecompose the original task into manageable subtasksDua et al. (2022); Zhou et al. (2023a)
Exploration and searchBranch and explore multiple paths to optimize reasoning trajectoriesYao et al. (2023a); Besta et al. (2024)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.277, + 0.181, + 0.719, + 0.197 + ], + "angle": 0, + "content": "Table 2: Summary of inference scaling with standalone LLM." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.227, + 0.303, + 0.243 + ], + "angle": 0, + "content": "2.4.1 Inference Scaling" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.254, + 0.885, + 0.452 + ], + "angle": 0, + "content": "Inference scaling techniques enhance reasoning capabilities during test time by increasing the amount of computation performed before generating an answer. These methods can be broadly categorized into three key strategies: (a) Prompt engineering and optimization, which focuses on constructing effective reasoning-provoking prompts through template-based methods, human curation, and automated optimization. (b) Search and planning methods, which include task decomposition, plan generation and verification, and exploration-based approaches. They enable structured multi-step reasoning, often involving backtracking within trees or graphs, to systematically explore potential solutions and verify their validity. (c) System-level enhancements, which incorporates external tools, knowledge sources, and verification mechanisms to augment the model's reasoning capabilities. For standalone LLMs, inference scaling primarily revolves around prompt construction and search strategies. In multi-agent settings, it further extends to include agent-agent communication and coordinated action strategies, enabling collaborative problem-solving. While these techniques have demonstrated significant effectiveness in improving reasoning performance without requiring updates to model parameters, they often come with increased computational costs during inference." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.47, + 0.318, + 0.485 + ], + "angle": 0, + "content": "2.4.2 Learning to Reason" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.497, + 0.884, + 0.589 + ], + "angle": 0, + "content": "This regime shifts the focus to training models to reason effectively before deployment, often referred to as training-time methods. The core idea is to simulate inference, generating trajectories that capture potential reasoning paths. These trajectories are then used to train the reasoner with online or offline learning methods. The methods include supervised and/or reinforcement learning. While learning-to-reason typically minimizes computational costs during inference, it incurs higher costs during simulation and training. In Section 5, we provide a detailed discussion of methods within this regime across different architectures." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.595, + 0.884, + 0.658 + ], + "angle": 0, + "content": "Recently, this paradigm has evolved to incorporate knowledge of both training and testing methods, enabling adaptive strategies. For instance, it now allows for the training of reasoners optimized for known inference techniques (Balashankar et al., 2024), or dynamically distributes computational costs between training and testing, offering a more flexible and efficient framework (Damani et al., 2025; Yue et al., 2025)." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.679, + 0.543, + 0.697 + ], + "angle": 0, + "content": "3 Improving Reasoning with Inference Scaling" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.713, + 0.885, + 0.926 + ], + "angle": 0, + "content": "Compared to small-scale models, pretrained large-scale language models (LLMs) have demonstrated emergent capabilities (Wei et al., 2022a), such as in-context learning (Dong et al., 2024) and role-playing (Shanahan et al., 2023a), which manifest without additional fine-tuning (i.e., without any gradient updates). Arguably, many of these abilities become apparent only after reaching a certain scale in model size. While scaling model parameters has been shown to improve reasoning performance across various tasks, the returns have diminished due to the high cost of training increasingly larger models. As a result, inference scaling has emerged as an appealing and orthogonal paradigm to unlock reasoning abilities in LLMs by providing additional test-time compute, allowing them to \"think\" before producing a final answer. It has been demonstrated that optimal scaling of test-time compute can be more effective than scaling model parameters (Snell et al., 2024), as it offers better generalization through enhanced flexibility in prompt and workflow design. Such deliberate thinking can be enabled either through training (DeepSeek-AI et al., 2025) or by explicit programming at inference time (OpenAI et al., 2024). In this section, we focus on the latter and defer training-time methods to Section 5. We begin with inference scaling methods for standalone LLMs and subsequently extend the discussion to single and multi-agent compound systems." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.113, + 0.104, + 0.46, + 0.12 + ], + "angle": 0, + "content": "3.1 Inference Scaling With Standalone LLM" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.133, + 0.884, + 0.181 + ], + "angle": 0, + "content": "In this section, we examine the core components and techniques that have made inference-time reasoning methods effective. Many of these methods draw inspiration from research on human cognitive processes on planning, problem solving, and decision-making (Newell et al., 1959; 1972; Stanovich & West, 2000)." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.199, + 0.502, + 0.215 + ], + "angle": 0, + "content": "3.1.1 Constructing Reasoning Provoking Prompts" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.227, + 0.885, + 0.304 + ], + "angle": 0, + "content": "Although large-scale pre-training endows LLMs with patterns that support reasoning, these capabilities often remain latent under generic prompts. Liu et al. (2025c) demonstrate that deep-reasoning behaviors—such as reflection and self-verification, which signal profound analytical thought—can be amplified simply by increasing the sampling budget. This highlights the importance of designing prompts that deliberately provoke reasoning, thereby surfacing and leveraging the latent human priors within LLMs." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.323, + 0.885, + 0.55 + ], + "angle": 0, + "content": "Instruction engineering Enabling LLMs to reason effectively depends heavily on the quality of the instructions provided (Sclar et al., 2024; Zhuo et al., 2024; Long et al., 2024a). Recognizing this, numerous prompt engineering studies aim to improve LLM reasoning by enhancing instructions. Extensive efforts in this direction primarily focus on template-based and human-curated instructions (Paranjape et al., 2021; Sanh et al., 2022; Mishra et al., 2022; Si et al., 2023; Long et al., 2024b). With LLMs becoming increasingly adept at following human instructions and generating human-like text, focus has shifted toward leveraging the models themselves to craft and refine high-quality instructions. A notable example of this shift is the Automatic Prompt Engineer (APE) introduced by Zhou et al. (2023b), which uses LLMs to generate high-quality instructions, achieving performance comparable to or surpassing that of human annotators on 31 reasoning tasks. Furthermore, other studies have proposed methods to modify instructions for improved reasoning. For instance, Deng et al. (2023a) and Mekala et al. (2024) present Rephrase-and-Response and EchoPrompt, respectively, two simple yet effective strategies where LLMs are instructed to rephrase queries before answering, significantly enhancing LLM performance on reasoning tasks. Similarly, Tian et al. (2023) introduce R3 prompting, which instructs LLMs to first extract key sentences from noisy contexts, then rephrase the instruction to explicitly include extracted sentences." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.57, + 0.884, + 0.768 + ], + "angle": 0, + "content": "Demonstration engineering Humans can address new problems by drawing analogy from relevant past experience (Holyoak, 2012). Inspired by this, Yasunaga et al. (2024) propose analogical prompting to guide LLMs to self-generate exemplars or knowledge relevant to the given problem as few-shot demonstrations for reasoning, outperforming hand-crafted or retrieved examples. For example, LLMs are prompted to generate a problem on calculating a third-order determinant before solving the given fourth-order determinant. Similarly, Chen et al. (2023d); Yang et al. (2023a); Luo et al. (2024a) highlight the effectiveness of self-generated relevant exemplars. Qin et al. (2025) further systematically assess the capability of LLMs to perform analogical reasoning and find that performance is not primarily determined by whether the exemplars are topically relevant to the task. Instead, they show that even exemplars from unrelated domains, such as self-generated biological exemplars, can lead to improved performance, as long as they are accurate and structurally aligned with the reasoning steps required by the target task. This highlights that the quality of the exemplar (its correctness, clarity, and structural usefulness for reasoning) can be the key limiting factor, rather than the relevancy regarding to the topic domain." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.774, + 0.884, + 0.926 + ], + "angle": 0, + "content": "Conventionally, a fixed set of few-shot demonstrations is applied to all queries, which can be suboptimal, especially when queries vary significantly. An alternative approach is to retrieve demonstrations tailored to the current query. Research has shown that retrieval-based demonstration selection significantly improves task performance. The main goals for selecting demonstrations are similarity (Rubin et al., 2022; Agrawal et al., 2023; Li et al., 2023e; Ye et al., 2023a) and diversity (Levy et al., 2023; He et al., 2023; Kim et al., 2024a). Various retrieval strategies have been proposed for selecting \\( k \\) demonstrations, including top- \\( k \\) similarity-based retrieval (Liu et al., 2022; Li et al., 2023e), clustering-based retrieval (Luo et al., 2023c; Wang et al., 2024i), and iterative retrieval (Khattab et al., 2022; Levy et al., 2023; Wang et al., 2024e). These methods enable adaptive and effective demonstration selection, enhancing the model's reasoning and generalization across diverse queries." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.511, + 0.961 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.104, + 0.884, + 0.272 + ], + "angle": 0, + "content": "In addition, many-shot in-context learning has emerged as a complementary line of work, where hundreds or even thousands of demonstrations are provided to significantly enhance the performance of LLMs, especially on complex reasoning tasks (Li et al., 2023c; Agarwal et al., 2024; Zou et al., 2024; Gu et al., 2025). Many-shot prompting can be seen as an extreme form of demonstration engineering, where the focus is on scaling the quantity of demonstrations to maximize the model's capacity to learn from in-context examples. However, the effectiveness of many-shot ICL is often limited by the high cost of obtaining a large number of labeled demonstrations. To mitigate this gap, Chen et al. (2025) recently introduce MAPLE, a novel influence-based many-shot ICL framework that identifies impactful unlabeled samples, pseudo-labels them by querying LLMs, and adaptively selects them for each test query. This approach effectively enhances many-shot ICL performance with minimal labeling cost, demonstrating improved adaptability and reasoning capabilities of LLMs." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.289, + 0.885, + 0.533 + ], + "angle": 0, + "content": "Prompt optimization Prompt optimization methods, aiming to systematically and strategically optimize prompts for improved performance, have been extensively explored for enhancing LLM reasoning. For instance, Xu et al. (2022) introduce Genetic Prompt Search (GPS), leveraging genetic algorithms to search for the best instruction. Similarly, Guo et al. (2024a) and Fernando et al. (2024) employ evolutionary algorithms to iteratively refine instructions, while Long et al. (2024c) introduce a minimax-game framework, inspired by Generative Adversarial Networks (Goodfellow et al., 2014) to simultaneously optimize instructions and demonstrations. Furthermore, Pryzant et al. (2023) present the concept of \"text gradients\" which leverage feedback from prompt executions and LLMs to update prompts, akin to Optimization by PROempting (OPRO) (Yang et al., 2024c), which uses execution feedback. Despite these advances, the interplay between various prompt optimization algorithms remains underexplored. Recently, Wan et al. (2024a) conducted a comprehensive evaluation of representative techniques for instruction and demonstration optimization, examining their effectiveness in isolation and combination across a range of challenging tasks. Their findings indicate that intelligently reusing samples from prompt evaluations as demonstrations consistently enhances performance, that demonstration selection strategies can have a greater impact than instruction optimization techniques, and that a synergistic combination of demonstration and instruction optimization can outperform their individual contributions." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.551, + 0.593, + 0.567 + ], + "angle": 0, + "content": "3.1.2 Optimizing Reasoning Output with Search and Planning" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.578, + 0.884, + 0.716 + ], + "angle": 0, + "content": "Generating reasoning subtasks Human problem-solving often involves planning manageable steps that lead to a successful resolution (Dostál, 2015). Likewise, improving LLM reasoning by breaking down complex problems into intermediate steps has become a successful paradigm. In this context, subtasks refer to the decomposed parts of a problem, structures are the frameworks guiding the reasoning process, and intermediate steps are intermediate results produced at each stage of problem-solving. Nye et al. (2021) and Wei et al. (2022b) pioneer this direction by proposing Chain-of-Thought (CoT) prompting which uses a few demonstrations with human-written intermediate steps to guide the model in solving complex problems in a similar style. Kojima et al. (2022) further simplified this approach by introducing zero-shot CoT prompting, which eliminates the need for demonstrations by instructing models to \"think step by step\" before answering." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.721, + 0.882, + 0.829 + ], + "angle": 0, + "content": "Simple CoT prompting often struggles as task complexity increases, particularly when the task surpasses the complexity of the provided demonstrations. To address this, researchers have proposed methods that explicitly guide models in decomposing tasks into subtasks, thereby enhancing intermediate step reasoning. Dua et al. (2022) propose an iterative approach, where tasks are progressively broken down into simpler subtasks and solved step-by-step. Similarly, Zhou et al. (2023a); Khot et al. (2023) and Suzgun & Kalai (2024a) advocate for a \"divide-and-conquer\" strategy, where tasks are first divided into subtasks and then solved sequentially." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.834, + 0.884, + 0.926 + ], + "angle": 0, + "content": "Beyond subtasks, researchers emphasize the importance of robust reasoning structures such as hierarchical and decision-making processes that capture the underlying mechanisms involved in problem-solving. Zhou et al. (2024b) introduce Self-Disccover, a framework that enables models to self-identify reasoning structures for any task using a seed set of general reasoning skill modules. Building on this, Aswani et al. (2024) propose Auto-Evolve, which dynamically adapts reasoning modules to accommodate more diverse problems. In addition to designing better reasoning steps, several studies address the need to correct intermediate" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.119, + 0.102, + 0.88, + 0.145 + ], + "angle": 0, + "content": "
PerspectiveMethodCharacteristicRepresentative Work
Feedback RefinementVerifier and ReflectionUse verifiers to select, modify, or refine actionsSnell et al. (2025); Madaan et al. (2023b)
Action EnhancementRetrieval and ToolAccess external knowledge and specialized resourcesLi et al. (2024e); Ma et al. (2024a)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.268, + 0.156, + 0.726, + 0.171 + ], + "angle": 0, + "content": "Table 3: Summary of inference scaling with single-agent system" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.199, + 0.884, + 0.245 + ], + "angle": 0, + "content": "steps. For example, Deng et al. (2024a); Yan et al. (2024) and Wu et al. (2024b) propose methods to refine intermediate outputs. Notably, Zhang et al. (2024i) observe that smaller models (\\(\\leq 13\\mathrm{B}\\) parameters) in particular need stronger models acting as verifiers to validate and correct intermediate steps." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.26, + 0.885, + 0.502 + ], + "angle": 0, + "content": "Exploration and search Research on human problem-solving reveals that complex reasoning tasks often admit multiple valid paths to reach a correct solution (Stanovich & West, 2000). Compared to linear reasoning structures like chain-of-thought, approaches that incorporate exploration during problem-solving have shown significant improvements for complex reasoning tasks. Unlike task decomposition methods (Dua et al., 2022; Zhou et al., 2023a; Khot et al., 2023), exploration-based approaches employ dynamic search through multiple possible reasoning paths simultaneously rather than following certain decomposition patterns, enabling models to explore ambiguous solution strategies for complex problems. Exploration typically involves two key components: branching and aggregation. Due to the stochastic nature of language model decoding, branching is often implemented through independent re-sampling with non-zero temperature, generating diverse reasoning chains. Early methods, such as self-consistency (Wang et al., 2023f), introduced branching only at the beginning of the reasoning chain, conditioned on the initial query. While simple, this approach lacks local exploration of intermediate reasoning steps, has limited applicability for tasks with multiple valid answers, and produces reasoning chains with restricted diversity (Chen et al., 2024d). More recent advancements, such as Tree-of-Thoughts (Yao et al., 2023a), Graph-of-Thoughts (Besta et al., 2024), and Forest-of-Thoughts (Bi et al., 2024), enable finer-grained branching by considering both the query and a history of previous thoughts or thought-state sequences, allowing for more nuanced and flexible exploration." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.51, + 0.884, + 0.616 + ], + "angle": 0, + "content": "The effectiveness of branched reasoning paths with thoughts or answers depends on aggregation or evaluation strategies. Recent progress is centered around two categories: ensemble-based methods and verifier-based methods. Ensemble-based methods have been widely employed due to their simplicity and self-contained nature, requiring no external knowledge or sources for validation. These approaches typically employ strategies such as majority voting across answer tokens (Wang et al., 2023f; 2024a; Li et al., 2024b) or confidence-based selection (Wang & Zhou, 2024). Verifier-based methods, in contrast, employ external verifiers or judges to score and select preferred answers among candidate solutions." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.633, + 0.488, + 0.649 + ], + "angle": 0, + "content": "3.2 Inference Scaling With Single-agent System" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.66, + 0.884, + 0.812 + ], + "angle": 0, + "content": "LLMs are trained on static, finite datasets, which inherently limits their parametric knowledge. This limitation hinders their ability to reason effectively in scenarios requiring up-to-date or highly specialized knowledge. The use of an agentic system, where LLMs are augmented with external verifiers, retrieval and tool integration, has proven effective in such scenarios. Verifiers provide reasoners with a signal of the quality of their outputs (e.g., a score or natural language feedback), which may be used by reasoners to modify or improve their outputs. Retrieval augmentation improves reasoning by enabling the agent to access relevant external knowledge, thereby reducing hallucinations and ensuring more accurate, fact-based responses. Additionally, the agent can achieve higher performance by leveraging specialized external tools to handle specific intermediate reasoning steps. For instance, allowing an agent to use a calculator can minimize errors stemming from inaccuracies in numerical generation." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.819, + 0.884, + 0.926 + ], + "angle": 0, + "content": "A pioneering approach in this domain is the ReAct framework (Yao et al., 2023b), which interleaves reasoning and acting by prompting LLMs to generate both reasoning traces and task-specific actions in an interleaved manner. This synergy allows the model to induce, track, and update action plans while interfacing with external sources (environment) to gather additional information. ReAct has demonstrated effectiveness across QA and interactive decision-making tasks. Building upon ReAct, LATS (Zhou et al., 2024a) unifies reasoning, acting, and planning within LLMs. By combining Monte Carlo Tree Search with ReAct, LATS enables structured search over a combinatorial space of reasoning and acting paths. More recently, Liu et al." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.104, + 0.884, + 0.135 + ], + "angle": 0, + "content": "(2024f) formalize reasoning and acting with LLMs under a Bayesian adaptive MDP and propose RAFA, a theoretically grounded framework for orchestrating the reasoning and acting of LLMs." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.149, + 0.483, + 0.165 + ], + "angle": 0, + "content": "3.2.1 Refinement with Verifiers and Reflections" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.175, + 0.885, + 0.298 + ], + "angle": 0, + "content": "A natural basis for modifying agent actions is the quality of their generated outputs—if the output is incorrect, the agent should attempt to correct it. However, ground-truth references are typically unavailable to the agent at test time. In such scenarios, agents often rely on verifiers, which are models or systems that provide an approximate measure of correctness, to guide action modifications. A special case arises when the verifier has access to ground-truth outcomes. Oracle verifiers (First et al., 2023; Xin et al., 2024a), which leverage correct answers, have shown significant performance improvements over baselines without verifiers (Huang et al., 2024a; Brown et al., 2024). However, their applicability is limited to scenarios where ground-truth data is readily available or easily accessible, such as in games or structured environments." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.303, + 0.885, + 0.41 + ], + "angle": 0, + "content": "In contrast, non-oracle (or imperfect) verifiers provide a more widely applicable solution. Their form varies depending on the task and knowledge source. For instance, Cobbe et al. (2021); Feng et al. (2023b); Snell et al. (2025) employ trained outcome reward models (ORMs) as verifiers to rerank responses. For more granular evaluation, Lightman et al. (2024) and Zhang et al. (2025b) train process reward models (PRMs) to serve as inference-time verifiers. By enabling the reward model to assess each reasoning step individually, PRMs generally yield greater improvements during inference compared to ORMs (Uesato et al., 2022; Tian et al., 2024)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.416, + 0.884, + 0.598 + ], + "angle": 0, + "content": "While reward models provide actionable signals about the quality of model responses, they are non-generative verifiers. As a result, they are unsuitable for verification approaches that require natural language feedback. For instance, synthesizing unit tests (Chen et al., 2023b; Hassid et al., 2024; Kapoor et al., 2024; Cook et al., 2024), commonly used in code generation tasks, necessitates verifiers capable of generating natural language. Broadly, generative verifiers are referred to as either critique models or LLM-as-judge models. In both cases, LLMs are either prompted or fine-tuned specifically for critique and evaluation. These models have been employed not only for output reranking (Vu et al., 2024) but also for providing valuable natural language feedback (Shinn et al., 2024; Shridhar et al., 2024; McAleese et al., 2024). However, recent studies have found that LLM-as-judge models generally underperform reward models (RMs) in terms of verification (Zhang et al., 2024e). To address this, researchers have sought to combine the strengths of both approaches under the Generative RM framework (Zhang et al., 2024e; Mahan et al., 2024; Liu et al., 2025b), aiming to unify the advantages of generative feedback with the precision of reward-based evaluation." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.605, + 0.884, + 0.697 + ], + "angle": 0, + "content": "Self-reflection or self-refinement approaches (Saunders et al., 2022; Madaan et al., 2024) aim to eliminate the need for additional, specialized verifier models by enabling the agent to critique and refine its own outputs. While some studies (Saunders et al., 2022; Madaan et al., 2024) have demonstrated empirical success, others highlight poor performance in the absence of robust verifiers (Stechly et al., 2023; Huang et al., 2024a; Stechly et al., 2024; Valmeekam et al., 2023; Shridhar et al., 2024). For a comprehensive review of recent advancements, see (Pan et al., 2024b)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.703, + 0.883, + 0.765 + ], + "angle": 0, + "content": "While verification methods can be deployed across a wider range of domains, they are susceptible to false positives—incorrect solutions that nevertheless pass verification. This limitation becomes particularly relevant when scaling up inference compute, as it can lead to diminishing returns on computational investment. Interested readers can refer to (Stroebl et al., 2024) for a comprehensive analysis of these trade-offs." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.779, + 0.566, + 0.794 + ], + "angle": 0, + "content": "3.2.2 Enhancement through Retrieval and Tool Utilization" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.805, + 0.884, + 0.926 + ], + "angle": 0, + "content": "During the reasoning process, agents can retrieve external knowledge to refine their internal state representations, resulting in more accurate reasoning steps. The advantages of retrieval are particularly pronounced in knowledge-intensive tasks that demand multi-hop and long-horizon reasoning, where connecting multiple pieces of information is essential to arrive at a final answer. Through retrieval, agents can access intermediate information, verify connections between data points, and integrate them into their reasoning process (Shi et al., 2024; Jiang et al., 2024b; Wang et al., 2024m). Retrieval also addresses critical flaws in LLMs, such as hallucination and factual inaccuracies. By grounding responses in retrieved facts, models are less prone to generating erroneous information and more likely to produce reliable and trustworthy outputs. For" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.12, + 0.102, + 0.88, + 0.171 + ], + "angle": 0, + "content": "
PerspectiveMethodCharacteristicRepresentative Work
DesigningDecentralizedNo hierarchy among agentsChen et al. (2023c); Chang (2024)
CommunicationCentralizedPresence of a central lead agentSuzgun & Kalai (2024a); Pan et al. (2024a)
ActionConditioned generationPerform reasoning based on other agents' outputsWang et al. (2024c); Gao et al. (2024b)
CoordinationDynamic adaptationAdapt actions based on specific tasksFourney et al. (2024); Yuan et al. (2024c)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.273, + 0.18, + 0.723, + 0.196 + ], + "angle": 0, + "content": "Table 4: Summary of inference scaling in multi-agent systems." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.223, + 0.884, + 0.375 + ], + "angle": 0, + "content": "instance, frameworks such as Verify-and-Edit (Zhao et al., 2023) and Chain-of-Knowledge (Li et al., 2024e) dynamically incorporate structured and unstructured knowledge sources to revise and correct intermediate reasoning steps within a reasoning chain. CRP-RAG (Xu et al., 2024b) improves multi-hop reasoning by dynamically adjusting reasoning paths and aggregating relevant knowledge. SelfRewardRAG (Hammane et al., 2024) enhances medical reasoning by combining RAG with self-evaluation, dynamically retrieving and synthesizing up-to-date medical information to ensure accurate response generation. By leveraging real-time data, such as clinical records from PubMed, it ensures responses are both current and precise. Another example is Think-on-Graph (Sun et al., 2023), a retrieval framework that integrates knowledge graphs (KGs) and text retrieval to deepen and refine reasoning in LLMs. GRATR (Zhu et al., 2024b) applies RAG techniques to enhance reasoning in multiplayer games with incomplete information." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.382, + 0.884, + 0.489 + ], + "angle": 0, + "content": "In addition to search and retrieval, agents can utilize other specialized tools to overcome their inherent limitations and significantly enhance reasoning performance. By integrating tools such as calculators, compilers, calendars, or specialized APIs, agents can access domain-specific resources, enabling them to operate more effectively in targeted applications (Yu et al., 2023b; Lu et al., 2024a; Li et al., 2025a). For instance, SCIAGENT (Ma et al., 2024b) leverages domain-specific tools like SymPy and WolframAlpha to enhance the reasoning capabilities of LLMs in scientific domains. Similarly, FinAgent (Zhang et al., 2024g) combines textual, numerical, and visual tools to improve performance in financial trading tasks." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.495, + 0.884, + 0.586 + ], + "angle": 0, + "content": "Moreover, external tools provide precise computational capabilities, allowing LLMs to transcend their limitations and perform complex numerical tasks with higher accuracy (Chen et al., 2023e; Li et al., 2023a). For example, MATHSENSEI (Das et al., 2024) employs tools such as Python, WolframAlpha, and Bing Search to tackle mathematical reasoning tasks across disciplines like algebra and calculus. TART (Lu et al., 2024b) integrates LLMs with tools for precise table-based reasoning tasks, such as table question answering and fact verification." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.593, + 0.884, + 0.701 + ], + "angle": 0, + "content": "Moreover, Anthropic introduced an open standard of Model Context Protocol (MCP) to seamlessly connect AI assistants with real-world data sources such as content repositories, business tools, and development environments. It provides a universal, scalable way for developers to create secure, two-way connections between AI tools and diverse data systems. While MCP holds significant promise, its adoption also introduces several challenges that must be addressed to support sustainable growth and responsible development. Hou et al. (2025) discussed some key issues, such as the absence of centralized security oversight, gaps in authentication and authorization, and difficulties in maintaining consistency across multi-step, cross-system workflows." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.718, + 0.49, + 0.735 + ], + "angle": 0, + "content": "3.3 Inference Scaling With Multi-agent Systems" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.746, + 0.884, + 0.822 + ], + "angle": 0, + "content": "By strategically designing communication patterns and coordinating actions, multi-agent systems can achieve more sophisticated reasoning by harnessing the specialized capabilities of multiple agents (Guo et al., 2024b). Effective communication design involves establishing structured message exchanges and interaction patterns among agents, while action coordination focuses on reconciling diverse outputs and achieving consensus to determine the final action in the environment." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.839, + 0.438, + 0.855 + ], + "angle": 0, + "content": "3.3.1 Designing Communication Patterns" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.865, + 0.884, + 0.927 + ], + "angle": 0, + "content": "A common communication pattern in multi-agent frameworks involves engaging multiple agents in debates or discussions (Liang et al., 2023b). For instance, the RECONCILE framework (Chen et al., 2023c) requires each agent to generate an answer accompanied by an explanation and a confidence score. The agents then participate in multi-round discussions to refine their responses, and a confidence-weighted voting mechanism" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.104, + 0.884, + 0.166 + ], + "angle": 0, + "content": "aggregates the answers into a consensus. Similarly, SocraSynth (Chang, 2024) employs opposing LLM agents moderated by predefined contentiousness levels to explore diverse perspectives. Additionally, GroupDebate (Liu et al., 2024e) organizes agents into groups that conduct internal debates before sharing their results, reducing token costs while maintaining robust logical reasoning capabilities." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.172, + 0.885, + 0.264 + ], + "angle": 0, + "content": "Besides decentralized communication, prior works also consider sending messages to a central node for decision making. For example, Suzgun & Kalai (2024b) employs a language model as a multi-faceted conductor that is good at handling and integrating various queries. Moreover, AgentCood (Pan et al., 2024a) assigns an LLM the role of a central planner for coordination strategy generation and agent assignment. Compared with decentralized communication, it can lead to more efficient resource allocation but increase the system vulnerability to potential failure of the central node." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.278, + 0.325, + 0.294 + ], + "angle": 0, + "content": "3.3.2 Coordinating Action" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.303, + 0.884, + 0.365 + ], + "angle": 0, + "content": "Effective action coordination among multiple agents is important for achieving the shared goals, especially given a dynamic and complex environment. Prior works explore various strategies which can enable agents to synergise agents' actions and optimize overall system reasoning and problem-solving performance. This approach leverages the strengths of different LLMs to overcome the limitations of individual models." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.372, + 0.884, + 0.493 + ], + "angle": 0, + "content": "One straightforward coordination strategy is chaining agents in a row, where agents can perform reasoning based on other agents' outputs. For example, Mixture-of-Agents (MoA) (Wang et al., 2024c) capitalizes on the cooperative nature of LLMs, allowing models to generate higher-quality responses by integrating and synthesizing contributions from multiple agents, achieving state-of-the-art performance. Similarly, Meta-Reasoning Prompting (MRP) (Gao et al., 2024b) assigns each agent to dynamically select the most effective reasoning method from a reasoning pool for a specific task, enabling the integration of diverse strategies to efficiently address multiple tasks. In addition, CoMM (Chen et al., 2024c) makes agents respond to discussions based on different role-playings." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.5, + 0.884, + 0.592 + ], + "angle": 0, + "content": "Moreover, coordination action can incorporate dynamic adaptation to task requirements. For example, Magentic-One (Fourney et al., 2024) introduces a lead agent as Orchestrator to conduct dynamic planning based on varied tasks. Gabriel et al. (2024) proposes a framework that deals with multi-hop queries, produces and executes task graphs, chooses suitable tools, and dynamically adapts to real-time changes. Additionally, EVOAGENT (Yuan et al., 2024c) dynamically generates various agents suitable for the given task and select those with high-quality outputs for result generation." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.609, + 0.332, + 0.628 + ], + "angle": 0, + "content": "4 Learning Algorithms" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.642, + 0.884, + 0.718 + ], + "angle": 0, + "content": "Before delving into methodologies for training reasoning models, we first describe the foundational learning algorithms used to train the reasoner's policy and verifiers. These algorithms are defined by their precise loss functions. Note that learning algorithms are independent of the data curation process, which will be discussed in detail in Section 5. We begin by presenting commonly used learning algorithms for training reasoning models in Section 4.1, followed by a discussion on training verifiers in Section 4.2." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.734, + 0.318, + 0.75 + ], + "angle": 0, + "content": "4.1 Learning of Reasoner" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.761, + 0.884, + 0.793 + ], + "angle": 0, + "content": "This section is organized into three key parts: (1) imitation learning through supervised fine-tuning, (2) reinforcement learning, and (3) preference learning." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.806, + 0.5, + 0.823 + ], + "angle": 0, + "content": "4.1.1 Imitation Learning - Supervised Fine-tuning" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.832, + 0.884, + 0.879 + ], + "angle": 0, + "content": "Supervised fine-tuning (SFT) maximizes the log probabilities of the next token \\( y_{i} \\) given the input prompt \\( x \\) and previously generated tokens \\( y_{< i} \\). Training the policy model \\( \\pi_{\\theta} \\) generally includes the steps to minimize the following loss function:" + }, + { + "type": "equation", + "bbox": [ + 0.326, + 0.887, + 0.884, + 0.93 + ], + "angle": 0, + "content": "\\[\nL _ {\\mathrm {S F T}} (\\theta) = \\mathbb {E} _ {x, y \\sim \\mathcal {D}} \\left[ \\sum_ {i} ^ {T} - \\frac {1}{T} \\log \\left(\\pi_ {\\theta} \\left(y _ {i} \\mid y _ {< i}, x\\right)\\right) \\right], \\tag {2}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.104, + 0.885, + 0.198 + ], + "angle": 0, + "content": "where \\(\\mathcal{D}\\) is the SFT dataset that comprises inputs \\(x\\) and ground truth labels \\(y\\). The ground truth labels can be either human-written or AI-generated reasoning process and answer response. The loss is equivalent to the next token prediction objective where the prompt input tokens are masked out and do not contribute to the loss. SFT is the often the default first (or only) step to train a base LLM to produce reasoning chains in zero-shot settings. SFT has also popularly used as an effective way to train smaller LLMs to imitate outputs generated by larger, more powerful LLMs, in a process known as knowledge distillation (Xu et al., 2024c)." + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.21, + 0.461, + 0.226 + ], + "angle": 0, + "content": "4.1.2 Reinforcement Learning for Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.235, + 0.885, + 0.314 + ], + "angle": 0, + "content": "Stiannon et al. (2020) and Ouyang et al. (2022) pioneered the application of reinforcement learning (RL), particularly proximal policy optimization (PPO) (Schulman et al., 2017), to improve not only reasoning capabilities but also the helpfulness and harmlessness of LLMs. Their work catalyzed a wave of innovations in preference learning and RL-based optimization techniques, as evidenced by subsequent studies (Rafailov et al., 2023; Ahmadian et al., 2024; OpenAI et al., 2024; DeepSeek-AI et al., 2025; Ramesh et al., 2024)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.327, + 0.884, + 0.358 + ], + "angle": 0, + "content": "Markov decision process. Most reinforcement learning (RL) approaches model text generation as a Markov Decision Process (MDP). In this framework, the process is defined by the following components:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.374, + 0.263, + 0.388 + ], + "angle": 0, + "content": "A set of states \\(S\\)" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.399, + 0.273, + 0.413 + ], + "angle": 0, + "content": "- A set of actions \\(\\mathcal{A}\\)," + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.423, + 0.726, + 0.438 + ], + "angle": 0, + "content": "- A state-action transition distribution \\( P(s_{t + 1}|s_t,a_t) \\) controlled by the environment," + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.448, + 0.616, + 0.462 + ], + "angle": 0, + "content": "- A reward function \\( R(s_{t},a_{t})\\in \\mathbb{R} \\) that provides a scalar reward, and" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.473, + 0.726, + 0.487 + ], + "angle": 0, + "content": "- A policy \\(\\pi (a_t|s_t)\\), which determines the actions to take based on the current state." + }, + { + "type": "list", + "bbox": [ + 0.116, + 0.374, + 0.726, + 0.487 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.506, + 0.885, + 0.689 + ], + "angle": 0, + "content": "At each time step \\( t \\), for a given state \\( s_t \\in S \\), the agent selects an action \\( a_t \\) and transitions to a new state \\( s_{t+1} \\), receiving a reward \\( R(s_t, a_t) \\) from the environment. The set of available actions at state \\( s_t \\) may be restricted to a subset of \\( \\mathcal{A} \\), denoted \\( \\mathcal{A}_{s_t} \\) (i.e., \\( a_t \\in \\mathcal{A}_{s_t} \\)). In the context of autoregressive language modeling with LLMs, generally the next token depends on all the previous tokens. As such, in order to apply RL training for LLMs, one needs to define the states and actions of the problem such that they both satisfy the temporal dependency constraint of the language modeling task as well as the Markov property. One common approach is to define that the current state \\( s_t \\) fully encapsulates all relevant information about the environment, in other words all previous tokens. This means the next state \\( s_{t+1} \\) depends solely on the current state \\( s_t \\in S \\) and the chosen action \\( a_t \\in \\mathcal{A}_{s_t} \\). In this way, the current state no longer needs to retrieve information from the previous states to decide the next action. As such, the state transition is agnostic to the history or previous states and actions. Within this MDP framework, the goal of RL is to learn a policy model that selects optimal actions by maximizing the expected cumulative rewards (Eq. 1)." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.704, + 0.884, + 0.81 + ], + "angle": 0, + "content": "- Action := token: Actions are defined at the token level, making the action space \\(\\mathcal{A}_{s_t}\\) is finite and equal in size to the vocabulary. The state \\(s_t\\) consists of all preceding tokens, including the input prompt and previously generated output tokens. The next state \\(s_{t+1}\\) is defined as the concatenation of the current state \\(s_t\\) and the action taken \\(a_t\\), i.e., \\(s_{t+1} \\coloneqq [s_t; a_t]\\). This category of methods defines rewards and related measures, such as values and advantages, at the token level. Works adopting this approach include most standard RLHF methods (Ouyang et al., 2022; Zheng et al., 2023b; Lee et al., 2023) as well as more recent fine-grained process-rewarding approaches (Yuan et al., 2024b; Cui et al., 2025)." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.819, + 0.884, + 0.926 + ], + "angle": 0, + "content": "- **Action := token chunk (step):** In this category of methods, actions are defined at the level of token chunks that semantically represent a reasoning step, separated by a special delimiter. As a result, the action space is infinite. The state \\( s_t \\) consists of the prompt and the output tokens generated in previous reasoning steps. Rewards, value scores, and advantages are computed at the step level, with all tokens within a reasoning step \\( a_t \\) sharing the same step-level score. This approach is particularly prominent in process supervision pipelines, as exemplified by DeepSeek-Math and VinePPO (Shao et al., 2024; Kazemnejad et al., 2024)." + }, + { + "type": "list", + "bbox": [ + 0.116, + 0.704, + 0.884, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.511, + 0.961 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.134, + 0.101, + 0.867, + 0.307 + ], + "angle": 0, + "content": "
TypeState stAction atAction spaceExample work
Action := tokenAll previous to-kens (prompt and current response tokens)one tokenfinite, vocabulary size(Ouyang et al., 2022; Zheng et al., 2023b; Lee et al., 2023)
Action := stepAll previous tokens of prompt and previous stepsa chunk of tokens representing a “reasoning step”, separated by a special delimiterinfinite(Shao et al., 2024) (process supervision), (Kazemnejad et al., 2024)
Action := full re-sponsePromptentire responseinfinite(Shao et al., 2024) (outcome supervision), (DeepSeek-AI et al., 2025)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.206, + 0.318, + 0.79, + 0.333 + ], + "angle": 0, + "content": "Table 5: Definitions of MDP states and actions across different training schemes." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.361, + 0.885, + 0.543 + ], + "angle": 0, + "content": "- Action := full response: In this category, the entire response—comprising all output tokens—is treated as a single action. This transforms the reasoning problem into a one-step MDP with an infinite action space. This approach has been recently popularized by DeepSeek-R1 (DeepSeek-AI et al., 2025) and previously by DeepSeek-Math (outcome supervision) (Shao et al., 2024). A unique aspect of this formulation is that the full response may semantically include multiple reasoning steps, such as spontaneous backtracking and self-evaluation behaviors, as observed in DeepSeek-R1 (DeepSeek-AI et al., 2025). Regardless of the number of humanly recognizable reasoning steps within the response, the entire output is still considered a single action. To assign token-level value scores, rewards, and advantages, Shao et al. (2024); DeepSeek-AI et al. (2025) compute these values based on the full response \\(a_{t}\\) and then distribute them uniformly across all tokens, similar to the step-level action setting. This formulation aligns with the concept of \"bandit\" prediction (with infinite action space) in REINFORCE-style RL (Nguyen et al., 2017; Kreutzer et al., 2017)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.564, + 0.884, + 0.701 + ], + "angle": 0, + "content": "Proximal Policy Optimization (PPO). As one of the primary variants of policy gradient methods, PPO has remained a popular and widely used RL algorithm (Schulman et al., 2017). To train the policy \\(\\pi_{\\theta}\\), PPO utilizes two additional models: the reference model \\(\\pi_{\\theta_{\\mathrm{ref}}}\\), which represents the initial state of the policy, and the value model \\(V\\), which estimates the state value \\(V(s_{t})\\). PPO begins by sampling a state-action trajectory \\(\\tau\\) with consecutive state-action pairs \\(s_{t+1} \\sim (s_{t}, a_{t})\\), then collects the respective intermediate or process reward (if available) and final (outcome) reward. Then, it computes the advantage \\(A(s_{t}, a_{t})\\) of each action \\(a_{t}\\) given the current state \\(s_{t}\\), which is defined as the relative strength of that specific action \\(a_{t}\\) compared to the probability-weighted actions that the policy could probably have taken from \\(s_{t}\\). The advantage is formulated as" + }, + { + "type": "equation", + "bbox": [ + 0.29, + 0.713, + 0.884, + 0.731 + ], + "angle": 0, + "content": "\\[\nA \\left(s _ {t}, a _ {t}\\right) := Q \\left(s _ {t}, a _ {t}\\right) - V \\left(s _ {t}\\right) := Q \\left(s _ {t}, a _ {t}\\right) - \\mathbb {E} _ {a _ {t} ^ {\\prime}} \\left[ Q \\left(s _ {t}, a _ {t} ^ {\\prime}\\right) \\right], \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.743, + 0.885, + 0.834 + ], + "angle": 0, + "content": "where \\( Q(s_{t},a_{t}) \\) represents the expected cumulative total reward that the policy is expected to obtain if it takes action \\( a_{t} \\) from \\( s_{t} \\) and continue to follow the current policy, while \\( V(s_{t}) \\) denotes the expected total rewards obtainable from state \\( s_{t} \\), known as the state value. The state value is equivalent to the expected value of \\( Q(s_{t},a_{t}^{\\prime}) \\) marginalized over all probable actions the current policy \\( \\pi_{\\theta} \\) may take from \\( s_{t} \\). If \\( A(s_{t},a_{t}) > 0 \\), the action \\( a_{t} \\) is encouraged, conversely, if \\( A(s_{t},a_{t}) < 0 \\), the action \\( a_{t} \\) is discouraged. After computing the advantages, PPO optimizes the policy \\( \\pi_{\\theta} \\) according to the following loss function." + }, + { + "type": "equation", + "bbox": [ + 0.134, + 0.846, + 0.884, + 0.888 + ], + "angle": 0, + "content": "\\[\nL _ {\\mathrm {P P O}} (\\theta) = \\mathbb {E} _ {\\tau \\sim \\pi_ {\\theta_ {0}}, P} - \\frac {1}{T} \\left[ \\sum_ {t = 0} ^ {T} \\operatorname {m i n} \\left(\\frac {\\pi_ {\\theta} (a _ {t} | s _ {t})}{\\pi_ {\\theta_ {o}} (a _ {t} | s _ {t})} A (s _ {t}, a _ {t}), \\operatorname {c l i p} \\left(\\frac {\\pi_ {\\theta} (a _ {t} | s _ {t})}{\\pi_ {\\theta_ {o}} (a _ {t} | s _ {t})}, 1 - \\epsilon , 1 + \\epsilon\\right) A (s _ {t}, a _ {t})\\right) \\right], \\tag {4}\n\\]" + }, + { + "type": "page_footnote", + "bbox": [ + 0.113, + 0.899, + 0.884, + 0.923 + ], + "angle": 0, + "content": "5The O-1 model series (OpenAI et al., 2024) also exhibit such behaviors, though the training approach for O-1 remains undisclosed." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.104, + 0.887, + 0.233 + ], + "angle": 0, + "content": "where \\( t \\in [0, T] \\) is a time step within trajectory \\( \\tau \\), \\( \\pi_{\\theta_o} \\) is the fixed policy from previous episode or iteration, and \\( P \\) is the transition distribution. The clip function, applied to the probability ratio \\( \\frac{\\pi_{\\theta}(a_t|s_t)}{\\pi_{\\theta_o}(a_t|s_t)} \\), ensures that the policy does not deviate too drastically or rapidly from its previous version. This also helps prevent catastrophic failure or suboptimal local solutions. Additionally, a KL divergence term \\( \\mathcal{D}_{\\mathrm{KL}}(\\pi_{\\theta}||\\pi_{\\theta_{\\mathrm{ref}}}) \\) is often incorporated into the loss function to constrain exploration during the later stages of training. \\( \\pi_{\\theta_{\\mathrm{ref}}} \\) is often a fixed initial reference policy that we do not want our policy to deviate too much from, while \\( \\pi_{\\theta_o} \\) is a snapshot of the current policy from the previous iteration which is updated regularly. Throughout the training process, both the policy \\( \\pi_{\\theta} \\) and value model \\( V \\) are iteratively updated." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.244, + 0.887, + 0.293 + ], + "angle": 0, + "content": "REINFORCE & RLOO. REINFORCE is another popular policy gradient method (Sutton, 2018; Williams, 1992; Nguyen et al., 2017; Kreutzer et al., 2017) for RL. This method seeks to optimize the reward weighted objective of the entire response as:" + }, + { + "type": "equation", + "bbox": [ + 0.279, + 0.298, + 0.884, + 0.318 + ], + "angle": 0, + "content": "\\[\nL _ {\\mathrm {R E I N F O R C E}} (\\theta) = \\mathbb {E} _ {x \\sim \\mathcal {D}, y \\sim \\pi_ {\\theta} (\\cdot | x)} [ (R (y, x) - b) \\nabla_ {\\pi_ {\\theta}} \\log \\pi_ {\\theta} (y | x) ] \\qquad (5)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.324, + 0.886, + 0.372 + ], + "angle": 0, + "content": "where \\( R(y, x) \\) represents the final reward for output \\( y \\) given input \\( x \\) and \\( b \\) is a baseline term introduced to reduce the variance of the gradient estimates. A widely used choice for \\( b \\) is the moving average of all rewards observed during training (Williams, 1992; Ahmadian et al., 2024)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.377, + 0.887, + 0.424 + ], + "angle": 0, + "content": "Recently, the REINFORCE Leave-One-Out (RLOO) method (Kool et al., 2019; Ahmadian et al., 2024) has been proposed, which replaces the traditional baseline calculation with the leave-one-out average of trajectory rewards obtained through Monte Carlo (MC) sampling, as shown in Eq. 6" + }, + { + "type": "equation", + "bbox": [ + 0.271, + 0.434, + 0.886, + 0.477 + ], + "angle": 0, + "content": "\\[\nL _ {\\mathrm {R L O O}} (\\theta) = \\frac {1}{k} \\sum_ {i = 1} ^ {k} [ R (y _ {i}, x) - \\frac {1}{k - 1} \\sum_ {j \\neq i} R (y _ {j}, x) ] \\nabla_ {\\pi_ {\\theta}} \\log \\pi_ {\\theta} (y _ {i} | x) \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.483, + 0.889, + 0.545 + ], + "angle": 0, + "content": "where \\( k \\) denotes the number of Monte Carlo samples. Unlike PPO, these algorithms do not rely on a parameterized value function (critic model) and instead depend solely on observed rewards. These methods share similarities with approaches such as Group-Relative Policy Optimization (GRPO) (Ramesh et al., 2024) and VinePPO (Kazemnejad et al., 2024), which will be discussed in detail below." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.559, + 0.888, + 0.668 + ], + "angle": 0, + "content": "Group-Relative Policy Optimization (GRPO). This algorithm has gained recent popularity through DeepSeek-R1 DeepSeek-AI et al. (2025), though it was also explored in earlier studies such as (Shao et al., 2024; Yang et al., 2024b;a; Team, 2024). It employs the same clipped surrogate objective as PPO, defined in Eq. 4 (Schulman et al., 2017). However, unlike PPO, which uses a parameterized value model to estimate the advantage \\( A(s_{t},a_{t}) \\), this approach samples a group \\( G = [o_{1},o_{2},\\dots,o_{g}] \\) of Monte-Carlo outputs for a given input \\( x \\). It then computes the corresponding rewards \\( R = [r_1,r_2,\\dots,r_g] \\), and determines the advantage of each output \\( o_i \\) as the group-normalized reward" + }, + { + "type": "equation", + "bbox": [ + 0.327, + 0.673, + 0.885, + 0.707 + ], + "angle": 0, + "content": "\\[\nA _ {\\mathrm {G R P O}} (s _ {i, t}, a _ {i, t}) = A _ {\\mathrm {G R P O}} (o _ {i}) = \\frac {r _ {i} - m e a n (R)}{s t d (R)}. \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.713, + 0.738, + 0.73 + ], + "angle": 0, + "content": "Then, the algorithm optimizes the policy \\(\\pi_{\\theta}\\) by minimizing the following loss function." + }, + { + "type": "equation", + "bbox": [ + 0.2, + 0.738, + 0.884, + 0.817 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} L _ {\\mathrm {G R P O}} (\\theta) = - \\frac {1}{| G |} \\sum_ {i} ^ {| G |} \\frac {1}{T _ {i}} \\sum_ {t} ^ {T _ {i}} m i n \\left\\{\\frac {\\pi_ {\\theta} (a _ {i , t} | s _ {i , t})}{\\pi_ {\\theta_ {o}} (a _ {i , t} | s _ {i , t})} A _ {\\mathrm {G R P O}} (s _ {i, t}, a _ {i, t}), \\right. \\\\ \\left. \\operatorname {c l i p} \\left(\\frac {\\pi_ {\\theta} \\left(a _ {i , t} \\mid s _ {i , t}\\right)}{\\pi_ {\\theta_ {o}} \\left(a _ {i , t} \\mid s _ {i , t}\\right)}, 1 - \\epsilon , 1 + \\epsilon\\right) A _ {\\mathrm {G R P O}} \\left(s _ {i, t}, a _ {i, t}\\right) \\right\\} \\tag {8} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.823, + 0.886, + 0.856 + ], + "angle": 0, + "content": "Variants of GRPO, such as DAPO (Yu et al., 2025), have also been introduced to alleviate issues with GRPO like length bias and inappropriate penalties for responses that exceed the context length." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.869, + 0.324, + 0.885 + ], + "angle": 0, + "content": "4.1.3 Preference Learning" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.894, + 0.886, + 0.927 + ], + "angle": 0, + "content": "Preference learning, particularly learning from human feedback, is a widely used post-pretraining alignment stage for LLMs. Its goal is to encourage the generation of responses that align with human preferences or" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.511, + 0.961 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.104, + 0.884, + 0.21 + ], + "angle": 0, + "content": "desired values, such as helpfulness or harmlessness (Ouyang et al., 2022; Bai et al., 2022; Ganguli et al., 2022). The data collection process for this stage typically involves prompting an unaligned LLM to generate multiple responses for a given input. Human annotators are then presented with pairs of responses and asked to select the preferred one. The resulting preference dataset is used to train a reward model. This reward model subsequently provides online reward scores for policy trajectories during PPO training, a process commonly referred to as reinforcement learning from human feedback or RLHF (Schulman et al., 2017; Ouyang et al., 2022; Touvron et al., 2023), as well as AI feedback (Lee et al., 2023)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.217, + 0.884, + 0.324 + ], + "angle": 0, + "content": "Preference learning has evolved beyond conventional reinforcement learning (RL)-based methodologies with the introduction of Direct Preference Optimization (DPO) (Rafailov et al., 2023) and its subsequent variants (Ethayarajh et al., 2024; Lai et al., 2024; Hong et al., 2024; Saeidi et al., 2024; Meng et al., 2024; Azar et al., 2024). DPO proposes using the policy language model itself to directly model human reward preferences from the preference dataset. This formulation eliminates the need for a separately trained reward model, instead optimizing the policy on the preference dataset with a simple binary classification loss. Formally, the policy \\(\\pi_{\\theta}\\) is optimized using a preference dataset \\(\\mathcal{D}\\) by minimizing the loss function:" + }, + { + "type": "equation", + "bbox": [ + 0.246, + 0.328, + 0.884, + 0.363 + ], + "angle": 0, + "content": "\\[\nL _ {\\mathrm {D P O}} (\\theta) = - \\mathbb {E} _ {(x, y _ {w}, y _ {l}) \\sim \\mathcal {D}} \\left[ \\log \\sigma \\left(\\beta \\log \\frac {\\pi_ {\\theta} (y _ {w} | x)}{\\pi_ {\\mathrm {r e f}} (y _ {w} | x)} - \\beta \\log \\frac {\\pi_ {\\theta} (y _ {l} | x)}{\\pi_ {\\mathrm {r e f}} (y _ {l} | x)}\\right) \\right], \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.367, + 0.884, + 0.472 + ], + "angle": 0, + "content": "where \\( y_{w} \\) and \\( y_{l} \\) represent the winning (chosen) and losing (rejected) outputs for input \\( x \\), respectively. DPO has gained popularity due to its simplicity and stability, bypassing the engineering complexity and challenges associated with PPO-based techniques. However, DPO is not without limitations, such as implicit biases toward longer responses and performance degradation over extended training periods (Ethayarajh et al., 2024; Meng et al., 2024). Subsequent advancements, including KTO (Ethayarajh et al., 2024), iPO (Azar et al., 2024), SimPO (Meng et al., 2024), ORPO (Hong et al., 2024), Step-DPO (Lai et al., 2024), and combination methods (Saeidi et al., 2024), have addressed many of these shortcomings." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.48, + 0.884, + 0.557 + ], + "angle": 0, + "content": "While the above learning algorithms are formulated for single turn input-to-output tasks, it is also generalizable to multi-turn conversations as well as function-calling agentic workflows. In such scenarios, the next state \\( s_{t+1} \\) may not always be a concatenation of all previous states \\( s_{\\leq t} \\) and actions \\( a_{\\leq t} \\), but it also depends on incoming response \\( h_t \\) from an outside environment, which can come from a follow-up user instruction or the returned result from a function call. In other words, one may define \\( s_{t+1} := [s_t; a_t; h_t] \\)." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.572, + 0.465, + 0.587 + ], + "angle": 0, + "content": "4.2 Learning of Verifiers and Reward Models" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.599, + 0.885, + 0.78 + ], + "angle": 0, + "content": "Verifiers play an important role in reasoning systems, improving performance both through training time credit assignment (Ouyang et al., 2022; Ziegler et al., 2019; Stiennon et al., 2020) and inference-time scaling verification (Snell et al., 2024). Reward modeling in the reasoning settings focuses on verifying the correctness of the reasoning chain, rather than evaluating using more general criteria, like helpfulness or safety (Ouyang et al., 2022). As a result, reward model training in reasoning is typically formulated as a binary classification problem between correct and incorrect reasoning steps. Based on label granularity, reward modeling is further categorized into outcome reward modeling (Section 4.2.1) and process reward modeling (Section 4.2.2). More recently, generative models for verification (Section 4.2.3) have emerged as a popular approach that produces actionable and explainable natural language feedback alongside rewards. In this section, we cover common training approaches for verifiers; In Section 6.1.3, we posit that verification itself may benefit from being studied as a reasoning problem itself, highlighting both concrete methods and recent analysis of failure modes in reasoning settings." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.794, + 0.422, + 0.81 + ], + "angle": 0, + "content": "4.2.1 Outcome Reward Models (ORM)" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.82, + 0.884, + 0.866 + ], + "angle": 0, + "content": "The goal of outcome reward models (ORMs) for reasoning is to provide a scalar reward for a full trajectory. Given a dataset \\(\\mathcal{D}\\) of input prompt \\(x\\) and sampled outputs \\(y\\) with corresponding correctness label \\(c\\in \\{0,1\\}\\), the goal of outcome reward modeling is to train the outcome reward model \\(r_{\\theta}\\) using the loss" + }, + { + "type": "equation", + "bbox": [ + 0.256, + 0.872, + 0.882, + 0.89 + ], + "angle": 0, + "content": "\\[\nL _ {\\text {o r m}} (\\theta) = \\mathbb {E} _ {x, y \\sim \\mathcal {D}} [ c \\log \\sigma (r _ {\\theta} (x, y)) + (1 - c) \\log (1 - \\sigma (r _ {\\theta} (x, y))) ], \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.894, + 0.884, + 0.926 + ], + "angle": 0, + "content": "where \\(\\sigma\\) is the sigmoid function. Alternatively, one can train ORMs with a pairwise formulation. Here, the correctness labels are not explicitly encoded in the loss function, but are used to categorize multiple sampled" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.104, + 0.884, + 0.15 + ], + "angle": 0, + "content": "outputs as correct or incorrect. From there, we can form pairs of outputs \\(\\{y_w, y_l\\}\\), where \\(y_w\\) reaches the correct outcome (e.g., correct answer for a math problem) and \\(y_l\\) reaches an incorrect outcome. The reward model \\(r_\\theta\\) is then typically trained with the Bradley-Terry loss, similar to that in DPO training (Equation 9)." + }, + { + "type": "equation", + "bbox": [ + 0.307, + 0.163, + 0.884, + 0.189 + ], + "angle": 0, + "content": "\\[\nL _ {\\text {o r m}} (\\theta) = - \\mathbb {E} _ {x, y _ {w}, y _ {l} \\sim D} \\left[ \\log \\left(\\sigma \\left(r _ {\\theta} (x, y _ {w}) - r _ {\\theta} (x, y _ {l})\\right)\\right) \\right], \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.199, + 0.884, + 0.245 + ], + "angle": 0, + "content": "Many other pairwise loss functions can be employed, such as hinge loss or other margin-based losses, focal loss, or variations of the Bradley-Terry loss. However, recent work (Liu et al., 2024a) has categorized the impact of loss functions, finding that the typical Bradley-Terry loss yields the best-performing ORM." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.26, + 0.411, + 0.277 + ], + "angle": 0, + "content": "4.2.2 Process Reward Models (PRM)" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.287, + 0.884, + 0.378 + ], + "angle": 0, + "content": "While outcome reward models are relatively simple to train, outcome-driven verification may encourage incorrect reasoning chains that lead to the correct outcome. As such, recent work has sought to train process reward models (PRMs) to assess correctness for each step in the solution. This requires more fine-grained labels than ORM training. Specifically, assume that for an output \\( y = (a_{1},\\dots ,a_{T}) \\), we obtain process-level supervision of the form \\( c_{1},\\ldots ,c_{T} \\), where \\( c_{t} \\) is a binary indicator of step \\( a_{t} \\) correctness. Then, the step-wise cross-entropy loss below is applied." + }, + { + "type": "equation", + "bbox": [ + 0.207, + 0.391, + 0.884, + 0.433 + ], + "angle": 0, + "content": "\\[\nL _ {p r m} (\\theta) = \\mathbb {E} _ {x, y \\sim \\mathcal {D}} \\left[ - \\frac {1}{T} \\sum_ {t = 1} ^ {T} \\left(c _ {t} \\log \\sigma \\left(r _ {\\theta} \\left(x, y _ {\\leq t}\\right)\\right) + \\left(1 - c _ {t}\\right) \\log \\sigma \\left(1 - \\sigma \\left(r _ {\\theta} \\left(x, y _ {\\leq t}\\right)\\right)\\right) \\right] \\right. \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.442, + 0.885, + 0.625 + ], + "angle": 0, + "content": "Above, \\( y_{\\leq t} \\) denotes the output prefix up to and including step \\( t \\). In practice, collecting step-level annotations \\( c_t \\) can be extremely expensive. As a result, recent work has used variants of Monte Carlo Tree Search to automatically obtain said annotations. Specifically, the annotation for a reasoning step is obtained by rolling out the response until completion from the intermediate step, then using the outcome accuracy as a proxy for correctness (Wang et al., 2024g; Jiao et al., 2024a; Wang et al., 2024k; Dou et al., 2024a; Luo et al., 2024b; Setlur et al., 2024b). As a concrete example, suppose we roll out five completions randomly from the same prefix \\( y_{\\leq t} \\), with three rollouts arriving at the correct answer. Then, the confidence that the prefix \\( y_{\\leq t} \\) is correct can be approximated as 0.6. These coarse signals can then be used to train a PRM. These two general approaches to constructing PRM training data have associated pros and cons: Collecting human annotations is expensive, but does not overfit PRM training to one particular policy. MCTS-based approaches yield annotations relatively quickly, but do not generalize beyond the policy from which samples are collected (Zheng et al., 2024; Setlur et al., 2024a)." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.64, + 0.321, + 0.655 + ], + "angle": 0, + "content": "4.2.3 Generative Verifiers" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.667, + 0.884, + 0.757 + ], + "angle": 0, + "content": "ORMs and PRMs are discriminative verifiers, and are therefore unable to generate natural language to support their scores. However, natural language reasoning for evaluations is valuable both as actionable feedback and as an explainable mechanism. As a result, generative verifiers have been proposed to assess responses and provide natural language feedback. Generative verifiers have progressed from prompting frontier LLMs to evaluation-specific finetuning, relying on many of the same learning algorithms presented in Section 4.1. As such, the focus of this section is largely on training data curation." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.774, + 0.885, + 0.926 + ], + "angle": 0, + "content": "Finetuned generative verifiers Generative verifiers are broadly classified as critique models or LLM-as-judge models. Critique models typically take as input a question and model response, and produce a critique with actionable feedback in natural language. The foundation of critique model training is critique training data. To construct training data, intentionally incorrect outputs are sampled from a policy model. Then, these outputs are corrected, usually with stronger model or human annotations. Using such samples, past methods (Wang et al., 2023c; Xi et al., 2024) have employed SFT (Section 4.1.1) to train critique models to imitate critiques. Other methods (Yao et al., 2023c; McAleese et al., 2024) have used used the typical RLHF workflow (Section 4.1.3), first training a reward model to use during PPO training. More recently, outcome-based RL (e.g., GRPO, as presented in Section 4.1.2) has been used for training, relying on either hand-crafted rewards (Akyurek et al., 2023) or execution feedback for code critique (Xie et al., 2025)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.104, + 0.884, + 0.242 + ], + "angle": 0, + "content": "LLM-as-judge models are a more general class of generative verifiers trained to evaluate model responses based on different protocols (pairwise evaluation, 1-5 rating, binary classification). These models rely on preference datasets, either annotated by a strong model or by humans. For example, to train a pairwise LLM-as-judge, one would collect a dataset of paired model responses for a given input prompt, then ask either a human or strong LLM to pick which response is better. Then, natural language explanations are distilled from stronger models, with distilled samples being categorized as correct or incorrect if the preference matches the annotation. From here, earlier LLM-as-judges (e.g., (Li et al., 2023b; Zheng et al., 2023a)) trained with SFT (Section 4.1.1), while newer approaches (Wang et al., 2024f; Hu et al., 2024) have used DPO (Section 4.1.3)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.256, + 0.885, + 0.348 + ], + "angle": 0, + "content": "Discriminative-generative hybrid verifiers Because generation is a more difficult task than classification, generative verifiers have often lagged discriminative reward models in benchmark performance. Recent work (Zhang et al., 2024f; Mahan et al., 2024) has sought to unify the two under the Generative Reward Model umbrella. Here, models use similar datasets to those used to train LLM-as-judge models, but augment the SFT loss with an answer-token loss. Concretely, given a dataset \\(\\mathcal{D}\\) with samples comprised of an input \\(x\\), model response \\(y\\), and outcome label \\(c\\) (e.g., \"Yes\"/\"No\" for correctness), the loss" + }, + { + "type": "equation", + "bbox": [ + 0.358, + 0.361, + 0.884, + 0.377 + ], + "angle": 0, + "content": "\\[\nL _ {G e n R M} (\\theta) = - \\mathbb {E} _ {x, y, c \\sim \\mathcal {D}} \\left[ \\log \\left(\\pi_ {\\theta} (c | x, y) \\right] \\right. \\tag {13}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.388, + 0.884, + 0.419 + ], + "angle": 0, + "content": "is added to the typical language generation losses (e.g., SFT or DPO loss) that are used to train the model to produce natural language explanations. Here, \\(\\pi_{\\theta}\\) is the generative reward model being trained." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.439, + 0.332, + 0.456 + ], + "angle": 0, + "content": "5 Learning to Reason" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.472, + 0.884, + 0.593 + ], + "angle": 0, + "content": "In Section 3, we explored various methods for enhancing reasoning through inference-time computation. While these approaches have proven effective in many scenarios, they come with notable limitations, such as constrained improvements in reasoning capabilities (since model parameters remain unchanged) and the requirement for substantial computational resources during inference. With the advent of OpenAI o1 (OpenAI et al., 2024), there has been a growing emphasis on improving reasoning through training-time methods. Recently, Deepseek-R1 (DeepSeek-AI et al., 2025) demonstrated that training-time approaches can achieve reasoning improvements comparable to, or even surpassing, those of inference-scaling methods. Reflecting this trend, this section delves deeper into the role of training in advancing reasoning capabilities." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.6, + 0.884, + 0.722 + ], + "angle": 0, + "content": "Specifically, we explore the data recipe, which focuses on constructing data (reasoning trajectories) tailored for reasoning tasks to facilitate training. At a high level, trajectory collection can be viewed as a form of simulation, where the generator produces reasoning steps—potentially incorporating calls and outputs from external tools—in response to either synthetic or real-world inputs. The primary challenge lies in ensuring that this simulation is both realistic and diverse while simultaneously providing meaningful supervision (reward) throughout the process. Depending on the architecture, as outlined in Section 2.3, this typically involves designing inputs (such as perception in single-agent systems or interaction in multi-agent systems) and outputs (such as actions in single-agent systems or coordination in multi-agent systems)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.729, + 0.884, + 0.835 + ], + "angle": 0, + "content": "Furthermore, we explore the model recipe. Depending on the learning algorithms (Section 4), the model recipe can be 'offline' (non-RL, e.g., SFT and offline RL, e.g. DPO), which focuses on extracting supervision (reward) from the collected trajectories and leveraging them for training. It can also be 'online' (most of RL algorithms, e.g., GRPO and PPO), where there is no need to collect trajectories beforehand, but learning occurs directly on the questions and their rewards. Similar to Section 3, we start with standalone LLMs, detailing how each of their components is trained (Section 5.1). Building on this foundation, we expand the discussion to single-agent systems (Section 5.2) and multi-agent systems (Section 5.3)." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.852, + 0.472, + 0.868 + ], + "angle": 0, + "content": "5.1 Learning to Reason with Standalone LLM" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.88, + 0.884, + 0.926 + ], + "angle": 0, + "content": "This section examines how standalone LLMs can be trained for reasoning tasks. For 'offline' methods, the process typically involves collecting reasoning trajectories, that lead to both correct and incorrect outcomes, followed by further training the LLM on these trajectories. In contrast, for 'online' methods, learning occurs" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.119, + 0.102, + 0.88, + 0.212 + ], + "angle": 0, + "content": "
PerspectiveMethodCharacteristicRepresentative Work
Constructing PromptsQuestion AugmentationExpand knowledge depth and breadth of seed questionsLuo et al. (2023b); Yu et al. (2024c)
Graph-based SynthesisSynthesize prompts guided by structured taxonomyLi et al. (2024a); Tang et al. (2024)
Collecting TrajectoriesRejection SamplingFilter low-quality trajectories from current policyDong et al. (2023)
Special Reasoning PatternImitate human-like reasoning behaviorYuan et al. (2024a); Qin et al. (2024)
Reasoning DistillationDistill reasoning capability from frontier reasoning modelHuang et al. (2024d)
Training from TrajectoriesImitation LearningLearn the behavior directly from the collected trajectoriesYu et al. (2024c)
Preference LearningOptimize preference between pos. and neg. trajectoriesJiao et al. (2024a)
Latent ReasoningCompress trajectory length using implicit reasoning tokensHao et al. (2024b)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.271, + 0.221, + 0.725, + 0.236 + ], + "angle": 0, + "content": "Table 6: Summary of learning to reason with standalone LLM." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.267, + 0.884, + 0.343 + ], + "angle": 0, + "content": "directly based on the sampled reasoning chains and their corresponding rewards. While much of the research focus has been on sampling high-quality outputs (i.e., trajectories), methods for generating a robust and diverse set of problems, or model inputs, have also garnered attention. We begin by detailing the process of collecting trajectories, which includes constructing inputs (Section 5.1.1) and obtaining outputs (Section 5.1.2). Subsequently, we describe how the LLM can be trained using the collected trajectories (Section 5.1.3)." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.361, + 0.543, + 0.379 + ], + "angle": 0, + "content": "5.1.1 Constructing High-quality Prompts for Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.388, + 0.884, + 0.48 + ], + "angle": 0, + "content": "To effectively drive knowledge distillation and model-seeking, we must curate a diverse collection of high-quality prompts that comprehensively span the target knowledge space. Relying on a narrow or homogeneous prompt set—even when sourced from a strong base model—limits exploration and undermines both distillation and reinforcement learning processes. By contrast, carefully crafted prompts expand the model's exploratory capacity, yielding richer representations and more robust downstream performance. As such, this section covers methods for collecting or synthesizing more challenging prompts." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.498, + 0.884, + 0.651 + ], + "angle": 0, + "content": "Question augmentation A straightforward approach to generating additional inputs is to directly augment existing datasets using frontier LLMs. For example, Xu et al. (2024a) propose using LLMs to \"evolve\" existing prompt sets, expanding their depth (e.g., more complex instructions) and breadth (e.g., rarer concepts). Yu et al. (2024c) have proposed two main approaches to augment existing questions. One is simply rewriting using frontier LLMs, and the other one is self-verification, which transforms an condition in the question into unknown variable, shows the original answer, and proposes a new question by querying the value of the unknown variable. Luo et al. (2023b) adopt a comparable strategy, employing a question generator to iteratively produce both harder and easier versions of a given question, as inspired by the instruction evolution approach of Xu et al. (2024a). The synthesized instructions are further refined using a reward model to ensure quality." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.668, + 0.885, + 0.806 + ], + "angle": 0, + "content": "Knowledge graph-based synthesis Directly augmenting prompts with LLMs can increase the size of the training set but does not inherently enhance diversity. To address this, knowledge graphs—structured taxonomies for organizing reasoning domains—have been utilized to construct input prompts with broader coverage. For instance, Li et al. (2024a) employ a frontier LLM to generate a knowledge graph directly, while Tang et al. (2024) task a frontier LLM with extracting a taxonomy from a seed dataset. These knowledge graphs are then used to progressively synthesize challenging questions, which are subsequently used to prompt larger teacher LLMs, resulting in high-quality instruction-tuning datasets with wider knowledge coverage. Additionally, Jiao et al. (2024b) leverage relation graphs derived from web documents to synthesize pretraining data, improving relation-based logical reasoning capabilities." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.822, + 0.519, + 0.839 + ], + "angle": 0, + "content": "5.1.2 Collecting High-quality Reasoning Trajectories" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.85, + 0.884, + 0.926 + ], + "angle": 0, + "content": "Beyond constructing high-quality prompts, researchers also refine outputs to collect better trajectories for training. These techniques often sample outputs that follow specific reasoning patterns, such as lengthy reasoning processes with self-reflection, and retain those that meet higher quality standards based on ground-truth labels. Consistent with our architecture definitions in Sec. 2.3, we treat the learned verifier as part of the environment in the agentic system. Consequently, this section focuses exclusively on methods that" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.961 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.112, + 0.104, + 0.884, + 0.135 + ], + "angle": 0, + "content": "utilize existing ground-truth labels—such as answer labels in maths or test cases for code generation—while deferring discussion of methodologies that rely on learned verifiers (reward models or LLM-judges) to Sec. 5.2." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.15, + 0.885, + 0.302 + ], + "angle": 0, + "content": "Rejection sampling Rejection sampling (Dong et al., 2023) aims to select higher-quality samples by repeatedly sampling from the policy model (reasoner). Quality is determined through two primary sources: (1) a learned verifier, which we discuss in Section 5.2, and (2) direct comparison with ground-truth labels (when available), where samples inconsistent with the ground-truth labels are discarded. Yuan et al. (2023) apply this idea to mathematical reasoning, introducing edit distance to ensure diversity among trajectories. Zelikman et al. (2022) propose STaR to incorporate the correct answer into the instruction, prompting LLMs to iteratively refine incorrect reasoning traces and generate higher-quality trajectories. Tong et al. (2024) employ an up-sampling strategy to increase the proportion of successful trajectories for more challenging questions. This approach has become a standard technique for iterative model self-improvement, as demonstrated in works such as (Jiao et al., 2025; Guan et al., 2025; Dou et al., 2024b)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.317, + 0.884, + 0.438 + ], + "angle": 0, + "content": "Encourage special reasoning pattern Another line of research focuses on leveraging human-like reasoning behaviors—such as self-reflection, deep reasoning, and thinking-before-action—to improve reasoning accuracy and reduce hallucinations. One notable approach is Reasoning-as-Planning (RAP) (Hao et al., 2023), which divides reasoning into three steps: thinking, taking action, and observing (inferring) changes in the environment. When applied to text-based reasoning problems, LLMs simulate environment states after taking actions, leading to more accurate reasoning. Building on this idea, Yuan et al. (2024a) and Chen et al. (2023a) use frontier LLMs like GPT-3.5 and GPT-4 to synthesize trajectories with this pattern for reasoning problems, facilitating imitation learning." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.445, + 0.884, + 0.506 + ], + "angle": 0, + "content": "Besides, inspired by the success of long and deep reasoning revealed by OpenAI's o1 model, which incorporate self-reflection and search, some researchers propose imitating this process through rule-based synthesis. For instance, Qin et al. (2024) flatten MCTS trajectories, including failed branches, and ask general models to generate bridge sentences for natural transition from the failed nodes to the ones along the successful paths." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.521, + 0.884, + 0.612 + ], + "angle": 0, + "content": "Reasoning distillation Several studies distill reasoning patterns from models capable of producing good reasoning chains (e.g., OpenAI o1) to replicate similar behaviors in smaller models. For example, Huang et al. (2024d), NovaSky Team (2025), Bespoke Labs (2025) and Muennighoff et al. (2025) distill reasoning chains from models like OpenAI-o1, Qwen-QWQ-32B, DeepSeek-R1, and Gemini Thinking Experimental, respectively. Min et al. (2024) diversify this approach by distilling from multiple reasoning models and aggregating outputs into a unified format." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.627, + 0.367, + 0.643 + ], + "angle": 0, + "content": "5.1.3 Training from Trajectories" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.652, + 0.883, + 0.683 + ], + "angle": 0, + "content": "Using the collected trajectories, training can be conducted by designing the input and output formats for the algorithms discussed in Section 4." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.698, + 0.884, + 0.865 + ], + "angle": 0, + "content": "Supervised Fine-Tuning (SFT) As discussed in Sec. 4.1.1, the most straightforward approach to training reasoning-capable LLMs is to fine-tune a model using SFT on collected trajectories. Methods such as (NovaSky Team, 2025; Bespoke Labs, 2025; Huang et al., 2024d) and (Min et al., 2024) utilize SFT with a modest number of data samples (4K-20K) to replicate the reasoning capabilities of OpenAI's o1 model. Recent SFT approaches have shifted focus to data scaling, with Xu et al. (2025e) exploring the impact of increasing data quantity up to 1 million CoT samples. Their findings demonstrate that performance improves with data scale, albeit with diminishing returns. In contrast, Muenighoff et al. (2025) adopt a sample-efficient approach, curating a high-quality 1K-sample reasoning dataset for fine-tuning. They show that this smaller dataset, combined with strategic inference-time prompting, achieves performance comparable to models trained on larger datasets. Similar strategies have been applied in domain-specific reasoning models, such as earlier math reasoning systems Yu et al. (2023a); Yue et al. (2023)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.88, + 0.884, + 0.926 + ], + "angle": 0, + "content": "Preference learning and reinforcement learning While SFT approaches have shown effectiveness, other studies demonstrate that preference learning further enhances performance. Min et al. (2024) study DPO, while Xu et al. (2025e) explore various post-training preference learning methods. Hui et al. (2024)," + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "27" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.104, + 0.884, + 0.242 + ], + "angle": 0, + "content": "Min et al. (2024), and Jiao et al. (2024a) all employ DPO with preference pairs derived from code test cases, outcome correctness, and a PRM trained on automatic supervision, respectively. Another line of work focuses on step-level DPO to optimize reasoning action selection. Specifically, Zhang et al. (2024h) use Tree-of-Thought (Yao et al., 2023a) to estimate outcome rewards and backpropagate them to intermediate nodes for quality assessment. Step-level DPO is then applied to pairs sharing the same trajectory prefix but with contrasting next actions. Lai et al. (2024) directly use GPT-4o to identify the earliest incorrect reasoning step and construct contrastive step-level DPO pairs for preference learning. Yuan et al. (2024d) adopt an iterative DPO approach in a self-rewarding setting, where the policy model itself acts as an LLM-as-judge to progressively improve its capabilities." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.248, + 0.885, + 0.354 + ], + "angle": 0, + "content": "In addition to preference learning, RL with verifiable answer labels also demonstrate importance in improving reasoning, where rule-based rewards by checking the correctness of sampled solutions are employed rather than reward models.6 Lambert et al. (2024) use both math reasoning and instruction following data for outcome-based reinforcement learning without reward models. Deepseek-R1 (DeepSeek-AI et al., 2025) further reveal the potential of pure reinforcement learning with verifiable answers. Yu et al. (2025) provide valuable reproduction of Deepseek-R1 on Qwen2.5-32B, including open-sourced data, code, and technical details about loss function design, reward shaping, and dynamic sampling." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.368, + 0.884, + 0.475 + ], + "angle": 0, + "content": "Training with latent reasoning Typical reasoning models generate long reasoning chains and have demonstrated strong empirical performance. However, this comes at the cost of increased inference time, as they produce lengthy natural language reasoning traces. These traces often contain many tokens that improve the flow and coherence of the output, with only a small fraction directly contributing to the reasoning process. To address this inefficiency, an alternative approach, known as latent reasoning, focuses on representing reasoning trajectories implicitly. This is achieved either by omitting intermediate reasoning tokens entirely or by compressing them into specialized reasoning tokens or continuous vector representations." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.481, + 0.884, + 0.572 + ], + "angle": 0, + "content": "Earlier work in continuous reasoning focused on compressing natural language reasoning chains into a smaller number of tokens. Deng et al. (2023b) employ knowledge distillation to encode the knowledge from natural language reasoning tokens into intermediate representations of the student model. During inference, the model generates only the final answer without producing additional rationale. This approach is further refined through curriculum learning (Deng et al., 2024b), which gradually removes reasoning tokens during training to reduce distribution mismatch." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.58, + 0.882, + 0.807 + ], + "angle": 0, + "content": "However, removing all explicit intermediate reasoning tokens may compromise the model's expressivity (i.e., ability to articulate complex reasoning) (Prystawski et al., 2023). A natural trade-off is to retain a limited number of reasoning tokens, making them implicit to enhance expressiveness while preserving performance. Goyal et al. (2024) introduce learnable tokens during pre-training and fine-tuning within standard CoT trajectories, enabling the model to perform additional computation before generating an output token. Wang et al. (2023d) explore various techniques for compressing reasoning steps from training trajectories into a fixed set of planning tokens. At the start of each reasoning step, the model generates a planning token, whose encoded \"knowledge\" guides the generation of more coherent outputs. Hao et al. (2024b) propose using the last-layer hidden states before the language modeling head as implicit reasoning token representations, feeding these back into the model to generate the next token auto-regressively. These implicit representations are optimized in a stage-wise manner, akin to the approach of Deng et al. (2024b). Xu et al. (2025f) propose an approach for continuous-space reasoning that does not require modifying the LLM reasoner. Specifically, they employ a lightweight fixed assistant model to generate instance-specific soft thought tokens speculatively as the initial chain of thoughts, which are then mapped into the LLM's representation space via a trainable projection module." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.822, + 0.506, + 0.839 + ], + "angle": 0, + "content": "5.2 Learning to Reason with Single-agent Systems" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.85, + 0.884, + 0.881 + ], + "angle": 0, + "content": "As discussed in Section 2.3, agentic systems enhance the reasoning capabilities of standalone LLMs by incorporating agent-environment interactions. These interactions enable the agent to perceive its environment" + }, + { + "type": "page_footnote", + "bbox": [ + 0.111, + 0.887, + 0.884, + 0.925 + ], + "angle": 0, + "content": "6We treat the work using reward model/tool-based verifier for RL in the scope of single-agent systems (see Sec. 5.2) 7As discussed in Section 4.2, in outcome-based RL, the reward is assigned to the entire trajectory. This contrasts with process-based RL, which assigns a reward at each step." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "28" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.119, + 0.102, + 0.88, + 0.194 + ], + "angle": 0, + "content": "
PerspectiveMethodCharacteristicRepresentative Work
Action-Environment InteractionsIncorporating FeedbackUse environment feedback to filter trajectoriesNi et al. (2024); Xin et al. (2024b)
Training External ModelsTrain models (e.g., to critic) from the interactionWu et al. (2024c)
Search with VerifiersUse verifiers to identify better reasoning trajectoriesWan et al. (2024c)
Distillation from TeacherDistill capability from frontier reasoning modelGou et al. (2024); Ma et al. (2024a)
Training from TrajectoriesSupervised Fine-TuningCollected offline trajectories + learn via SFTDou et al. (2024b); Yin et al. (2024)
Reinforcement LearningLearning directly on questions and their rewardsShao et al. (2024)
Learning with RefinerTrain refiner model to iteratively improve the last-round solution.Xiong et al. (2025)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.257, + 0.204, + 0.738, + 0.219 + ], + "angle": 0, + "content": "Table 7: Summary of learning to reason with single-agent systems." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.248, + 0.884, + 0.293 + ], + "angle": 0, + "content": "and accordingly perform actions. This section explores how simulation is achieved through the design of such perceptions and agent actions. It then covers training methods—how agents are trained using these trajectories. Additionally, we discuss how predefined patterns are leveraged when collecting trajectories." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.31, + 0.637, + 0.326 + ], + "angle": 0, + "content": "5.2.1 Trajectory Collection through Agent-Environment Interactions" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.336, + 0.884, + 0.398 + ], + "angle": 0, + "content": "By interacting with the external world in different ways, agents can effectively construct trajectories that help refine their reasoning process. These interactions to enrich reasoning take the form of (a) incorporating execution feedback, (b) training external models to help reasoning, (c) search with verifiers, and (d) trajectory distillation from stronger teacher agents." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.414, + 0.884, + 0.581 + ], + "angle": 0, + "content": "Incorporating execution feedback Through active interaction with the environment, the agent can obtain valuable feedback for trajectory filtering. Building on STaR (Zelikman et al., 2022) (discussed in Sec. 5.1.2), NExT (Ni et al., 2024) leverages unit tests (Ye et al., 2022) to obtain self-generated rationales that lead to correct solutions for training. AlphaProof (AlphaProof & teams, 2024) and DeepSeek-Prover (Xin et al., 2024a) solve formal theorem-proving problems by generating potential solutions and validating them through interaction with the Lean proof assistant (De Moura et al., 2015), either proving or disproving the solutions. Xin et al. (2024b) further improve DeepSeek-Prover by introducing RMaxTS, an exploration strategy driven by intrinsic rewards to generate diverse proof paths. Furthermore, the agent can integrate environmental information directly into the training process to improve its reasoning capabilities. For example, Cummins et al. (2023) train a 7B model from scratch, achieving significantly improved code optimization performance by leveraging optimizing transformations from external LLVM compilers." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.598, + 0.884, + 0.675 + ], + "angle": 0, + "content": "Training external models The agent can leverage its interaction with the environment to train external models that can in turn help the agent's reasoning. For example, Wu et al. (2024c) train a critic model to identify relatively easier problems for the policy to explore and guide the policy in searching for deeper proof paths. Re-ReST (Dou et al., 2024b) proposes training a refiner to correct the agent's wrong output based on environmental feedback." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.691, + 0.884, + 0.782 + ], + "angle": 0, + "content": "Reasoning search with verifiers Search-based methods address sampling challenges for more difficult problems by leveraging external reward models or generation probabilities to guide decoding. For example, Wan et al. (2024c) develop a Monte Carlo Tree Search (MCTS)-based approach to identify better reasoning trajectories. Each tree node represents either a sentence or token, and a learned LLM-based value function and outcome reward model are used to estimate expected returns during the search process. This method can be applied for both inference-time path selection and training-time imitation learning." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.789, + 0.885, + 0.926 + ], + "angle": 0, + "content": "Guan et al. (2025) rely solely on outcome labels to iteratively update the policy model and a process preference model (PPM) through MCTS. The PPM approximates the Q-value of intermediate reasoning steps. Lai et al. (2024) use an LLM-as-judge to identify the first reasoning step in a sampled trajectory that contains an error. The trajectory up to the error is then used to sample new outputs, and DPO preference pairs are formed from correct and incorrect outputs. Zhang et al. (2024h) focus on unsupervised settings where answer labels are unavailable. Discarded steps collected during the search process are treated as negative actions, contrasting with the steps retained in the final path for DPO training. For multi-step reasoning in dynamic environments, such as web navigation, Putta et al. (2024) propose combining guided MCTS with self-critique to facilitate more effective exploration." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "29" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.104, + 0.884, + 0.332 + ], + "angle": 0, + "content": "Trajectory distillation from stronger teacher agents To tackle challenging mathematical problems, Gou et al. (2024) curate interactive tool-use (e.g., code execution) trajectories using GPT-4, derived from existing mathematical datasets across various domains. Similarly, MuMath-Code (Yin et al., 2024) employs multi-perspective data augmentation to generate diverse math questions and synthesizes code-nested solutions using GPT-4. Beyond mathematics, other domains have also been explored. For instance, Ma et al. (2024a) construct a tool-augmented training set for scientific reasoning by prompting GPT-4. CoGEX (Weir et al., 2024) extends LLMs' program synthesis capabilities to tasks that are not easily expressible as code, such as commonsense reasoning and sarcasm understanding. To collect training trajectories, GPT-4 is used to transform the Alpaca dataset (Taori et al., 2023) into the required format. Ke et al. (2025b) explore collecting trajectories from a more capable generative reward model (GPT-4o) to train a finance-expert model by identifying and correcting the first erroneous step in the reasoning process. Additionally, AgentBank (Song et al., 2024) introduces the largest dataset of agent-environment interaction trajectories, comprising 16 tasks across 5 distinct agent skill dimensions. This dataset is created by annotating actions and their corresponding rationales using LLMs of varying scales, addressing key challenges in trajectory collection, such as scalability." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.338, + 0.884, + 0.385 + ], + "angle": 0, + "content": "In addition to leveraging trajectories from GPT-4, Gou et al. (2024) introduce output space shaping by incorporating samples generated by the agent itself. Specifically, they train the agent on both self-sampled correct trajectories and those corrected by a teacher model, promoting diversity in plausible reasoning steps." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.399, + 0.419, + 0.415 + ], + "angle": 0, + "content": "5.2.2 Agent Training from Trajectories" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.426, + 0.884, + 0.651 + ], + "angle": 0, + "content": "Supervised Fine-Tuning (SFT) After collecting trajectories, many methods apply supervised fine-tuning (SFT) to train the agent, enabling models with little prior experience in agentic environments to adapt quickly. Dou et al. (2024b) enhances agent reasoning by incorporating refiner-corrected samples into the self-training process. NExT (Ni et al., 2024) uses filtered trajectories to train agents for program repair tasks, while Weir et al. (2024) fine-tune agents on collected trajectories to enable the generation and emulation of pseudo-programs. AlphaProof (AlphaProof & teams, 2024) and DeepSeek-Prover (Xin et al., 2024a) iteratively train and refine the policy model using verified proofs, improving performance in theorem proving tasks. Similarly, Gou et al. (2024), Yin et al. (2024), Ma et al. (2024a), and Song et al. (2024) fine-tune agents on agent-environment interaction trajectories generated by proprietary LLMs, enhancing reasoning capabilities across diverse domains. Notably, MuMath-Code (Yin et al., 2024) adopts a two-stage training strategy, first fine-tuning on pure CoT data and then on code-nested data. Chen et al. (2024e) introduce Agent-FLAN, a fine-tuning method designed to improve LLMs' agent capabilities while addressing challenges such as distribution shifts and hallucinations in training data. By redesigning the training corpus and incorporating negative samples, Agent-FLAN enhances both agent-specific and general capabilities of LLMs." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.668, + 0.882, + 0.925 + ], + "angle": 0, + "content": "Reinforcement Learning (RL) Beyond imitation learning through SFT, recent approaches have leveraged reinforcement learning to further enhance reasoning capabilities. Notably, GRPO (Shao et al., 2024; DeepSeek-AI et al., 2025), which employs verifiable outcome rewards during online RL training, has demonstrated strong empirical performance. Havrilla et al. (2024) investigate multiple RL algorithms (e.g., Expert Iteration, PPO) for math reasoning tasks, finding that incorporating outcome reward models has negligible effects on performance for both Expert Iteration and PPO. Similarly, Shao et al. (2024) observe relatively minor performance gains when using PRMs during GRPO training. Yang et al. (2024b) explore using a PRM to \"shape\" outcome rewards by using a linear combination of outcome and PRM rewards for GRPO training. In contrast, Wang et al. (2024g); Luo et al. (2023a); Jiao et al. (2024a) demonstrate that using a trained PRM during PPO training leads to significant performance improvements. Similar gains are observed in the code generation domain (Dai et al., 2024), where the PRM serves both as a reward signal and as an initial checkpoint for the value function during PPO. Zhang et al. (2024a) iteratively train both a PRM and LLM, while Setlur et al. (2024b) provide a new perspective by comparing Q-value-based PRMs with advantage function-based ones, showing improved learning efficiency and performance in guided reinforcement learning. Concurrently, Gao et al. (2024a) address reward hacking (Casper et al., 2023)—where the policy model generates numerous correct but irrelevant reasoning steps to inflate rewards—by implementing clipping and computing relative, step-adjacent rewards." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "30" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.118, + 0.102, + 0.88, + 0.196 + ], + "angle": 0, + "content": "
PerspectiveMethodCharacteristicRepresentative Work
Designing CommunicationCentralized communicationUse a centralized controller for information aggregationCanese et al. (2021); Matta et al. (2019)
Conditioned information sharingShare information based on relevancy and privacyHong et al. (2023); Qiu et al. (2024)
Coordinating ActionsLeverage knowledgeUtilize expert knowledge as constraintsLau et al. (2012)
Graph-based methodsUse graphs as structured frameworksRuan et al. (2022); Li et al. (2020)
Hierarchical approachDivide policies to strategy and executionXu et al. (2023)
Training from TrajectoriesTraining data from interactionsObtain high-quality trajectories from interactionsLi et al. (2024c); Estornell et al. (2024)
Gradient modificationModify gradients towards optimal pointsLi et al. (2024f)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.265, + 0.206, + 0.731, + 0.221 + ], + "angle": 0, + "content": "Table 8: Summary of learning to reason for multi-agent systems." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.25, + 0.884, + 0.327 + ], + "angle": 0, + "content": "Qiao et al. (2023a) introduce TRICE, a two-stage framework that enables agents to determine when and how to use tools through Reinforcement Learning with Execution Feedback (RLEF) from external tools. Similarly, Xin et al. (2024b) enhance DeepSeek-Prover by incorporating reinforcement learning from proof assistant feedback (RLPAF). To effectively learn from both successful and unsuccessful agent-environment interactions, Putta et al. (2024) develop an off-policy variant of DPO for iterative training." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.343, + 0.884, + 0.42 + ], + "angle": 0, + "content": "Learning with refiner For more challenging questions, models may fail to generate enough successful trajectories to serve as a reliable positive training signal. However, even trajectories with incorrect outcomes can still be leveraged effectively. For example, Qu et al. (2024a) train a correction model using RL to iteratively refine generated model responses. Similarly, Tang et al. (2025) propose a self-evolving framework to train a critique model, which enhances the quality of outputs through continuous feedback." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.427, + 0.884, + 0.653 + ], + "angle": 0, + "content": "Refiner models can also be integrated into the search process to iteratively improve generation quality. For instance, Snell et al. (2024) train a refiner model via RL (Qu et al., 2024b) to refine outputs sequentially. The final prediction is obtained through majority voting over all predictions generated during this iterative refinement process, effectively scaling test-time computation. Xi et al. (2024) develop a step-level critique model that provides feedback for each reasoning step, using training instances collected from GPT-4o. This feedback serves two purposes: (1) expanding training data to improve the actor model, and (2) scaling test-time computation through iterative self-refinement in a multi-agent setup. Zhang et al. (2024b) combine reasoning and self-refinement into a single MCTS framework, where each node is either a reasoning node (generating complete reasoning trajectories) or a refining node (identifying and correcting reasoning flaws). A learned pairwise reward model compares the quality of refined and original outputs, estimating the expected returns of each node. However, this work does not explicitly account for the inference setting, where neither the reasoner nor the refiner has access to the correctness of the sampled response. This can lead to refiners inadvertently degrading originally correct solutions. To address this issue, Xiong et al. (2025) introduce a learnable self-rewarding mechanism. This approach mitigates the risk of worsening correct solutions and alleviates the distribution-shifting problem in self-correction (Kumar et al., 2024)." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.672, + 0.496, + 0.687 + ], + "angle": 0, + "content": "5.3 Learning to Reason with Multi-agent System" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.7, + 0.885, + 0.807 + ], + "angle": 0, + "content": "In Section 2.3, we discussed how multi-agent systems extend single-agent systems through agent-agent communication. This enables agents to assume distinct roles, exchange messages, and coordinate their actions before interacting with the environment. In this section, we explore how trajectory collection can be achieved through the careful design of agent-agent communication and the coordination of actions across different agents. As a system level, communication serves as the input or perception mechanism for participating agents, focusing on the protocols governing message exchange. Meanwhile, actions represent the output of the system, addressing how consensus is reached given the diverse actions proposed by individual agents." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.823, + 0.472, + 0.839 + ], + "angle": 0, + "content": "5.3.1 Designing Agent-Agent Communication" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.85, + 0.884, + 0.926 + ], + "angle": 0, + "content": "In a multi-agent framework, ensuring that each agent is aware of the actions of others is critical, as a well-designed communication system can significantly enhance collective intelligence (Guo et al., 2024b). One effective solution is the use of a centralized controller (Canese et al., 2021). For example, Matta et al. (2019) propose a centralized aggregation center that constructs a global swarm matrix by aggregating the Q-value tables of all agents. Similarly, the MARCO framework (Zhang et al., 2021) employs centralized training with" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "31" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.104, + 0.884, + 0.15 + ], + "angle": 0, + "content": "decentralized execution to improve sample efficiency in partially observable multi-agent environments. By learning a shared model that generalizes across agents' policies and directing exploration toward uncertain areas, MARCO optimizes reasoning and resource utilization in cooperative tasks." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.157, + 0.885, + 0.263 + ], + "angle": 0, + "content": "To enable effective communication among agents, Sukhbaatar et al. (2016) introduce a neural communication model with a learned protocol tailored to the task. Additionally, a shared message pool (Hong et al., 2023) can be implemented, where agents send messages and subscribe to relevant ones based on their individual profiles. In recent work by Qiu et al. (2024), each agent maintains a private intention, which includes its current goal and associated sub-tasks. These intentions are broadcast periodically, and a propagation network converts them into teammate-specific communication messages, ensuring that relevant goals are shared with the appropriate teammates." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.28, + 0.513, + 0.296 + ], + "angle": 0, + "content": "5.3.2 Coordinating Actions among Multiple Agents" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.307, + 0.884, + 0.457 + ], + "angle": 0, + "content": "To enhance coordination among multiple agents, various approaches have been proposed, including leveraging expert knowledge, graph-based frameworks, and hierarchical structures to improve efficiency and effectiveness. For better coordination of actions across agents, Lau et al. (2012) utilize expert coordination knowledge as constraints to refine the exploration and learning process. By reducing the action space and focusing on promising states, this approach enhances decision-making. Additionally, graph-based methods have been explored to improve coordination. For instance, the Graph-based Coordination Strategy (GCS) (Ruan et al., 2022) introduces a framework that employs a directed acyclic graph to coordinate agent policies. This enables agents to synchronize their actions through predefined temporal sequences. Similarly, Deep Implicit Coordination Graphs (DICG) (Li et al., 2020) propose a graph neural network-based module to dynamically infer coordination structures for multi-agent reinforcement learning (MARL)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.465, + 0.884, + 0.511 + ], + "angle": 0, + "content": "Furthermore, hierarchical approaches have been developed to enhance synchronization. The Hierarchical Cooperative Multi-Agent Learning (HAVEN) framework (Xu et al., 2023) divides policies into two levels—strategy and execution—improving both inter-agent and inter-level coordination." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.527, + 0.466, + 0.543 + ], + "angle": 0, + "content": "5.3.3 Multi-Agent Training from Trajectories" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.554, + 0.884, + 0.78 + ], + "angle": 0, + "content": "Compared to single-agent scenarios, multi-agent training introduces additional challenges in higher coordination and communication complexity and recent approaches have leveraged different ways to address the challenge. DEBATUNE (Li et al., 2024c) employs a multi-round debate mechanism between two agents with opposing stances to generate training data. Through iterative debate, arguments are refined, resulting in high-quality and diverse outputs. During the training phase, models are fine-tuned using these debate-generated trajectories, enabling controllability and alignment with user-defined stances. Similarly, Subramaniam et al. (2025) fine-tune a society of agents, starting from the same base model, on independent data generated through multi-agent interactions. These agents specialize in distinct roles, such as \"generation\" and \"critic\" producing diverse reasoning trajectories. Training on such varied trajectories fosters specialization and mitigates performance plateaus. Acc-Debate (Estornell et al., 2024) utilizes an Actor-Critic framework to train a team of two agents collaboratively. One agent serves as the \"Actor\" generating responses, while the other acts as the \"Critic\" refining those responses. Training alternates between optimizing the Actor and Critic models, leveraging partial trajectory rewards which captures the expectation of reaching the correct answer at intermediate time stepsto address temporal dependencies in the debate process. This approach enhances collaboration and improves final performance." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.788, + 0.884, + 0.834 + ], + "angle": 0, + "content": "Furthermore, Li et al. (2024f) address the challenge of mixed-motive cooperation in multi-agent systems by modifying gradients to guide agents toward stable fixed points that balance individual and collective interests. This method enhances the ability to optimize trajectories for effective collaboration." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.852, + 0.524, + 0.868 + ], + "angle": 0, + "content": "5.4 Toward Cost-aware and Inference-aware Training" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.88, + 0.884, + 0.926 + ], + "angle": 0, + "content": "As reasoning models grow increasingly complex, ensuring both efficiency and effectiveness becomes crucial. Inference-time scaling and learning-to-reason approaches play complementary roles, as most inference-time scaling methods can be applied to models specifically trained for reasoning. However, both approaches come" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.961 + ], + "angle": 0, + "content": "32" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.104, + 0.884, + 0.212 + ], + "angle": 0, + "content": "with associated costs, whether it involves generating thousands of additional tokens compared to greedy decoding during inference or training models on large-scale trajectory datasets. Consequently, cost-aware methodologies, which factor in computational costs when deciding how to allocate resources during both training and inference, or those that address sample inefficiency, have gained recent attention. Similarly, inference-aware methodologies aim to enhance the time and cost efficiency of inference scaling by explicitly incorporating inference-time scaling strategies during training. In this section, we explore emerging cost-aware and inference-aware approaches." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.228, + 0.325, + 0.243 + ], + "angle": 0, + "content": "5.4.1 Cost-aware Training" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.255, + 0.885, + 0.421 + ], + "angle": 0, + "content": "Learning to reduce inference cost This line of research explores strategies to optimize the tradeoff between computational cost and reasoning performance by dynamically allocating resources based on input (prompt) complexity and desired output quality. For prompt analysis, Damani et al. (2025) use a learnable model to predict the difficulty of batched queries and dynamically allocate inference budgets accordingly. Building on this, Zhang et al. (2024d) train a model to predict the most efficient combination of inference strategies, directly optimizing for pass rates. Yue et al. (2025) decompose reasoning trajectories into specific behaviors and employ a trainable planner to derive question-specific compositions, identifying the optimal reasoning strategy—such as whether question decomposition or rewriting is necessary, whether Python programs are required, or if answer verification is needed. On the output side, Snell et al. (2025) propose a look-ahead search method, similar to step-level beam search, which switches between branches based on estimated returns to minimize search costs." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.439, + 0.884, + 0.562 + ], + "angle": 0, + "content": "Data-efficient training Another research direction focuses on reducing training costs by using a small set of high-quality samples (questions paired with trajectories or labels). Muennighoff et al. (2025) curate a dataset of 1,000 samples, emphasizing difficulty, diversity, and quality. Their work demonstrates that finetuning Qwen2.5-32B-Instruct on this dataset achieves performance surpassing o1-preview on competition math benchmarks. Ye et al. (2025) fine-tune Qwen2.5-32B-Instruct on 817 carefully curated training samples, achieving superior performance across a broader set of math reasoning benchmarks. Notably, Ye et al. (2025) highlight that these performance gains depend on using strong pre-trained models like Qwen2.5-32B-Instruct and do not occur with weaker models (e.g., Qwen1.5-32B-Instruct)." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.578, + 0.357, + 0.594 + ], + "angle": 0, + "content": "5.4.2 Inference-aware Training" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.605, + 0.884, + 0.681 + ], + "angle": 0, + "content": "Existing work on inference scaling typically treats inference-time computation as a post-hoc design choice after conventional training. Inference-aware training approach challenges the assumption that decoupling training and inference-time computation is optimal. For instance, if an LLM is allowed multiple attempts to solve a math problem, fine-tuning it to explore diverse problem-solving strategies might yield better results than simply generating candidates representing its best single attempt." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.688, + 0.882, + 0.809 + ], + "angle": 0, + "content": "The core idea is that explicitly considering the inference procedure during training can significantly enhance the effectiveness of inference-time computation. For example, Best-of-N (BoN) is a basic inference-time strategy that selects the highest-reward response from \\( N \\) candidates. However, this approach is misaligned with fine-tuning objectives. To address this, Sessa et al. (2024) propose an RL objective that distills the Best-of-N distribution into the policy model using Jeffreys divergence (Jeffreys, 1946). Similarly, Balashankar et al. (2024) develop a calibrated reward that incorporates the inference procedure (Best-of-N) during alignment. In a related effort, Chow et al. (2024) aim to optimize BoN directly, overcoming the non-differentiable argmax operator by employing a reinforcement learning framework." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.83, + 0.517, + 0.848 + ], + "angle": 0, + "content": "6 Discussion: Trends and Open Challenges" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.865, + 0.884, + 0.927 + ], + "angle": 0, + "content": "The field of reasoning LLMs has seen rapid advancements, with notable trends emerging in training-vs-inference regimes and architectural dimensions as we discuss in Section 6.1. Despite this progress, several challenges remain, hindering their generalizability and practical applicability. This section outlines these observed trends and highlights open challenges, along with potential directions to address them (Section 6.2)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "33" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.117, + 0.103, + 0.287, + 0.119 + ], + "angle": 0, + "content": "6.1 Observed Trends" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.132, + 0.885, + 0.253 + ], + "angle": 0, + "content": "Following the two dimensions outlined in Figure 2, we identify two key trends in LLM reasoning: one progresses from inference scaling to learning to reason (Section 6.1.1), while the other shifts from standalone LLMs to agentic systems (Section 6.1.2). Additionally, reasoning is ubiquitous yet challenging when developing a general-purpose reasoner. Notably, many state-of-the-art reasoning language models are predominantly focused on a few domains, particularly mathematics and coding (OpenAI et al., 2024; DeepSeek-AI et al., 2025). Whether it is possible to build a truly generalizable reasoning system remains an open question (Kang et al., 2024; Qi et al., 2024; Huang et al., 2024c; Sun et al., 2024c). However, we observe a growing trend toward developing domain-specific reasoning models (Section 6.1.3)." + }, + { + "type": "title", + "bbox": [ + 0.117, + 0.268, + 0.519, + 0.285 + ], + "angle": 0, + "content": "6.1.1 From Inference Scaling to Learning to Reason" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.295, + 0.885, + 0.402 + ], + "angle": 0, + "content": "Since the introduction of CoT and self-consistency (Wang et al., 2023f), inference scaling techniques have emerged as a key paradigm for enhancing reasoning performance without incurring the costs associated with reasoning-specific training. Inference scaling complements learning-to-reason approaches, with recent studies demonstrating that combining self-consistency with reasoning-specific training yields further improvements (DeepSeek-AI et al., 2025; Muennighoff et al., 2025). Additionally, since the release of OpenAI's o1 (Huang et al., 2024d), some methods have sought to activate human-like reasoning patterns by introducing self-correction (Kumar et al., 2024), self-critique (Xi et al., 2024), or even MCTS Qin et al. (2024)." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.408, + 0.885, + 0.544 + ], + "angle": 0, + "content": "Researchers initially found that data-driven approaches, such as supervised fine-tuning (SFT) and knowledge distillation, were highly effective in enhancing LLMs' reasoning capabilities. However, these methods rely on the availability of a strong teacher model for distillation. An alternative approach uses outcome labels for iterative rejection sampling (Yuan et al., 2023), which converges quickly after a few iterations (Dong et al., 2023). These limitations have spurred the development of more data-efficient methods, such as automatic process supervision (Jiao et al., 2024a; Wang et al., 2024g;k; Luo et al., 2024b) and iterative refinement (Guan et al., 2025), which optimize training trajectories using fixed outcome labels. The release of Deepseek-R1 (DeepSeek-AI et al., 2025) further advanced the field, demonstrating the ability to generate human-like, long reasoning chains through pure reinforcement learning under outcome supervision alone." + }, + { + "type": "title", + "bbox": [ + 0.117, + 0.56, + 0.502, + 0.577 + ], + "angle": 0, + "content": "6.1.2 From Standalone LLMs to Agentic Systems" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.587, + 0.885, + 0.648 + ], + "angle": 0, + "content": "In Sections 2.3 and 5, we discussed how the rise of agentic systems has significantly influenced reasoning research. A clear trend has emerged, shifting from standalone LLM reasoning to agentic reasoning. This shift aligns with our expectations: reasoning is no longer confined to a single LLM but is expected to interact with the external world and other agents, as well as exhibit autonomy, such as planning capabilities." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.655, + 0.885, + 0.791 + ], + "angle": 0, + "content": "On one hand, there is ongoing debate about whether agentic reasoning is always beneficial, especially for straightforward and simple tasks (Sprague et al., 2024b; Liu et al., 2024c). On the other hand, current systems' autonomy is largely limited to planning, whereas it could encompass much more. For instance, system-level or meta-level planning is essential in agentic systems, requiring the design of effective ways to connect different agents (Zhou et al., 2025a; Zhuge et al., 2024; Zhang et al., 2024c; Hu et al., 2025). A notable recent study (Ke et al., 2025c) demonstrates that such design can be with zero supervision and through self-improvement alone. Another critical aspect of autonomous agents is proactivity, yet current reasoning agents still lack the ability to proactively seek clarification or request additional information from users or the environment." + }, + { + "type": "title", + "bbox": [ + 0.117, + 0.807, + 0.379, + 0.823 + ], + "angle": 0, + "content": "6.1.3 Domain-Specific Reasoners" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.833, + 0.885, + 0.925 + ], + "angle": 0, + "content": "Mathematical reasoning Mathematics serves as an ideal testbed for studying LLM reasoning capabilities due to its structured nature and clear evaluation criteria. Mathematical reasoning has evolved along two complementary paths. The first, often referred to as the \"informal approach\" (Yang et al., 2024d), treats mathematical problems as natural language tasks and fine-tunes LLMs on carefully curated or filtered problem-solving datasets. Systems like NuminaMath (Fleureau et al., 2024), DeepSeekMath (Shao et al., 2024), Llemma (Azerbayev et al., 2024), and MetaMath (Yu et al., 2024b) have demonstrated remarkable" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.51, + 0.961 + ], + "angle": 0, + "content": "34" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.104, + 0.884, + 0.226 + ], + "angle": 0, + "content": "capabilities by combining mathematical text training (pre-training, supervised fine-tuning, and reinforcement learning), tree-based search, tool-integrated reasoning, and various inference scaling techniques discussed in earlier sections. This approach has achieved significant success across benchmarks ranging from GSM8K (Cobbe et al., 2021) and MATH (Hendrycks et al., 2021b) to competition-level problems such as AIMO (Markets, 2024) and AIME-level problems (aim, 2025). However, challenges persist in tackling college-level and advanced mathematics, where high-quality training data is scarce, and verifying complex multi-step reasoning becomes increasingly difficult. Spatial reasoning (e.g., counting, navigation, and inferring spatial relationships) presents another challenge for LLMs and multi-modal LLMs (Wang et al., 2024b)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.232, + 0.885, + 0.415 + ], + "angle": 0, + "content": "Complementing the informal approach, formal mathematical reasoning grounds systems in precise symbolic frameworks, such as proof assistants like Isabelle (Nipkow et al., 2002), Lean (De Moura et al., 2015), and Coq (Barras et al., 1997; The Coq Development Team, 2024). Recent advances in this direction include neural theorem-proving systems that combine tactic generation with proof search (Yang et al., 2023b; Thakur et al., 2024), as well as autoformalization techniques that translate between natural and formal mathematics (Wu et al., 2022; Jiang et al., 2024a). The formal approach offers several advantages: automatic verification of reasoning steps, generation of training signals from the verification environment, and the potential to bootstrap capabilities through learned abstractions. For example, AlphaProof (AlphaProof & teams, 2024) and AlphaGeometry (Trinh et al., 2024) demonstrate the power of integrating neural networks with symbolic verification, achieving groundbreaking performance on Olympic-level mathematics problems. A recent position paper by Yang et al. (2024d) argues that formal mathematical reasoning represents a critical frontier for advancing AI's ability to tackle increasingly abstract and complex mathematical problems." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.432, + 0.884, + 0.613 + ], + "angle": 0, + "content": "Code generation Code serves as a more formal language for reasoning. Given the complexity of generating entire programs, earlier studies primarily focused on function-level code completion, as demonstrated by benchmarks such as HumanEval (Chen et al., 2021) and MBPP (Austin et al., 2021). With stronger foundation models trained on extensive code corpora (Zhu et al., 2024a; Hui et al., 2024), the focus of evaluation has shifted toward general competition programming (Hendrycks et al., 2021a; Jain et al., 2024). The earliest significant attempt to solve competition-level coding problems through large-scale training was AlphaCode (Li et al., 2022). Similar to the general domain, the training paradigm has evolved from instruction tuning (Wei et al., 2024) to RL and preference learning based on test cases and compiler feedback (Dou et al., 2024a; Weyssow et al., 2024; Jiao et al., 2025; Huang et al., 2024b). The recent releases of DeepSeek-R1 (DeepSeek-AI et al., 2025) and OpenAI's o3 (OpenAI et al., 2025) have further advanced the field by enabling end-to-end RL through outcome supervision. OpenAI et al. (2025) also highlight that purely data-driven approaches can outperform models incorporating human-experience-based competition strategies." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.62, + 0.884, + 0.742 + ], + "angle": 0, + "content": "Another important application of code generation is in software engineering, where advancements in LLMs are making fully automated pipelines increasingly feasible. SWE-Bench (Jimenez et al., 2024), a benchmark based on GitHub issues, challenges LLMs with real-world software engineering problems. These tasks require coupled abilities, such as long-context modeling to process repository-level inputs, logical reasoning to locate bugs and design unit tests, and programming to implement solutions. Wei et al. (2025) pioneer the use of end-to-end RL for optimizing automatic debugging. Specifically, they select pull requests (PRs) from GitHub linked to issues and use the consistency between the predicted code snippet and the repository's code after the PR is merged as the reward signal." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.759, + 0.885, + 0.926 + ], + "angle": 0, + "content": "Tabular reasoning Reasoning over tabular (or structured) data, which involves generating responses based on user queries and provided tables, plays a vital role in improving data analysis efficiency (Lu et al., 2025). A critical aspect of tabular reasoning with LLMs involves transforming structured data into a format that these models can process effectively. Techniques such as serialization (Chen, 2023; Cheng et al., 2023; Chen et al., 2023e), prompt engineering (Ye et al., 2023b; Lin et al., 2023b; Wang et al., 2024n; Zhang et al., 2024j), and embedding methods (Herzig et al., 2020) have been widely studied to facilitate this adaptation, converting tabular data into human-readable text or leveraging specialized table representations. Additionally, specialized prompting of LLMs with transformed tabular data is crucial. For instance, Pourreza & Rafiei (2023); Ye et al. (2023c) find that LLMs perform better on decomposed sub-tasks than on the entire table reasoning task. However, LLMs may still struggle with certain sub-tasks. To address this, (Cao et al., 2023) employ diverse tools for specific sub-tasks, while (Lin et al., 2023b;a) focus on retrieving relevant" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "35" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.104, + 0.884, + 0.15 + ], + "angle": 0, + "content": "tables. Notably, (Jiang et al., 2023) propose a unified approach to enhance LLM reasoning over structured data by designing specialized interfaces. These interfaces extract relevant evidence from structured data, enabling LLMs to focus on reasoning based on the gathered information." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.157, + 0.885, + 0.293 + ], + "angle": 0, + "content": "Despite the promising results of various adaptation methods, significant challenges remain. First, tabular data often comprises diverse feature types—categorical, numerical, and textual—adding complexity to modeling (Borisov et al., 2023; Gruver et al., 2023). Second, the effectiveness (Sui et al., 2024) and robustness (Liu et al., 2024d) of LLMs in tabular tasks heavily depend on proper prompt design and data preprocessing. Poor or out-of-distribution preprocessing can lead to information loss, misinterpretation, multicollinearity, and interpretability issues, significantly degrading performance (Sui et al., 2024). Finally, LLMs are prone to hallucinations (Ye et al., 2023d) and fairness concerns (Liu et al., 2023), limiting their reliability. For a comprehensive overview, see recent surveys on LLMs for table reasoning (Fang et al., 2024b; Dong & Wang, 2024; Zhang et al., 2025a; Lu et al., 2025)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.315, + 0.884, + 0.39 + ], + "angle": 0, + "content": "Reasoning in multi-agent games In game-theoretic scenarios involving both collaboration and competition, strategic social reasoning skills are essential (Lee et al., 2024). Strategic reasoning refers to the cognitive process of making decisions in complex social situations. As highlighted by Feng et al. (2024b), the complexity and challenges of this reasoning stem from the involvement of multiple parties and the dynamic nature of the environment." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.397, + 0.884, + 0.61 + ], + "angle": 0, + "content": "To capture the cognitive states of multiple parties, the concept of Theory-of-Mind (ToM) (Zhang et al., 2012) has been integrated into modeling processes. ToM attributes mental states—such as beliefs, intentions, desires, emotions, and knowledge—to oneself and others. Recent studies (Kosinski, 2024) have shown that LLMs exhibit ToM capabilities, and researchers have leveraged these capabilities to enhance strategic reasoning in social scenarios. For instance, Guo et al. (2023) computationally model the beliefs, intents, and potential behaviors of teammates and opponents to improve understanding and reasoning in games. Similarly, TOMABD (Montes et al., 2023) incorporates ToM into agents to enhance their reasoning and decision-making abilities. To address the complexity of dynamic social interactions (Li et al., 2024d), prior research employs RL methods to explore potential behaviors and evaluate different states (Seo & Lee, 2017; Wen et al., 2019). Additionally, some studies introduce modular frameworks to improve strategic reasoning in complex scenarios. For example, ReTA (Duan et al., 2024) uses LLM-based modules as the main actor, reward actor, and anticipation actor, inspired by minimax game theory. Recent work (Trecsenyi et al., 2025) has also begun exploring role-based multi-agent interactions to enable more sophisticated strategic reasoning. These approaches collectively enhance LLMs' strategic reasoning capabilities in dynamic environments." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.631, + 0.884, + 0.782 + ], + "angle": 0, + "content": "Reward modeling and evaluation as a reasoning task Evaluation, whether as an end goal or a component of a larger reasoning system, remains a significant challenge. While using PRMs to enhance reasoning abilities is popular during both inference and training, training these models requires extensive step-by-step annotations (Lightman et al., 2024). To address this, recent approaches have introduced automated feedback mechanisms, such as tree search (Wang et al., 2024g; Chen et al., 2024a; Setlur et al., 2024a; Luo et al., 2024c; Wang et al., 2024l) or, less frequently, LLM-as-judge (Zhang et al., 2025b). Although these methods avoid human preference annotations, they often rely on trajectories sampled from a fixed policy model, which may not align well with the problem distribution. This misalignment leads to poor generalization, as highlighted by Zheng et al. (2024). Consequently, the next frontier in reward modeling will need to combine automated data collection with diverse data sources to achieve annotation-efficient generalization." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.789, + 0.885, + 0.926 + ], + "angle": 0, + "content": "While reasoning in LLM-as-judges is not explicitly addressed, recent training and inference techniques have drawn from established methods for improving reasoning. Judge-based assessment inherently involves a finite set of outcomes (e.g., A or B for pairwise judgments or 1-5 for single ratings), making it suitable for self-consistency decoding (Kim et al., 2024b). More advanced inference-time approaches, such as multi-judge or multi-round discussions (Li et al., 2023d; Chan et al., 2023; Verga et al., 2024; Yu et al., 2024d), self-rationalization (Trivedi et al., 2024), or sequential escalation (Jung et al., 2024), have been proposed. Concurrently, training-time solutions for LLM-as-judges focus on distilling chain-of-thought judgments from larger teacher models and fine-tuning smaller judges via supervised fine-tuning (Wang et al., 2023g; Li et al., 2023b; Kim et al., 2023; 2024c; Vu et al., 2024) or preference optimization (Hu et al., 2024; Wang et al.," + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "36" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.104, + 0.884, + 0.181 + ], + "angle": 0, + "content": "2024f; Ye et al., 2024; Saad-Falcon et al., 2024; Deshpande et al., 2024; Wang et al., 2024j). Despite these advancements, such models still struggle in reasoning-intensive domains (Tan et al., 2024; Zhou et al., 2025b; Xu et al., 2025b), whereas stronger reasoning models have outperformed specialized judge models in more difficult evaluation settings (Xu et al., 2025a). In all, recent benchmarking results highlight that developing reasoning-specific judges remains an open and challenging research area." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.198, + 0.285, + 0.214 + ], + "angle": 0, + "content": "6.2 Open Challenges" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.226, + 0.885, + 0.348 + ], + "angle": 0, + "content": "Despite the trends observed in Section 6.1, several challenges remain. First, how can we effectively evaluate both the reasoning outcome and the reasoning chain? (Section 6.2.1). Second, do we truly understand reasoning? Does the reasoning chain generated by next-token sampling faithfully reflect the internal reasoning process of an LLM, or is it merely imitating its training data? (Section 6.2.2). Third, training of LLM reasoning system is still largely hindered by substantial data requirements, which include both more challenging questions and the corresponding outcome labels. This not only affects the end-to-end reasoner training, but also limits our exploration in building stronger reward models to facilitate inference time scaling (Section 6.2.3)." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.364, + 0.334, + 0.379 + ], + "angle": 0, + "content": "6.2.1 Evaluating Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.39, + 0.884, + 0.481 + ], + "angle": 0, + "content": "As language models and agentic systems tackle increasingly complex tasks, evaluating their performance becomes equally challenging. Currently, progress in LLM reasoning is measured by outcome performance on fixed benchmarks (e.g., MATH (Hendrycks et al., 2021b)). However, relying solely on outcomes to verify reasoning correctness may be insufficient, as a correct final answer does not guarantee a logically sound reasoning chain (Hao et al., 2024a). Prior work has shown that LLMs often produce unfaithful reasoning chains, even when the final answers are correct (Wiegreffe et al., 2022; Lyu et al., 2023; Wang et al., 2023b)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.488, + 0.884, + 0.699 + ], + "angle": 0, + "content": "Evaluating reasoning beyond outcomes remains an open and challenging problem. Early approaches relied on human annotators to assess the quality of generated explanations (Camburu et al., 2018; Rajani et al., 2019), focusing on whether the reasoning could lead to the same predictions. To scale this idea, follow-up works (Wiegreffe et al., 2020; Hase et al., 2020) used trained models as simulators to evaluate the alignment between generated reasoning and final predictions. When human-annotated reasoning chains are available, some studies leverage traditional NLG metrics to measure overlap between human- and model-generated explanations (Clinciu et al., 2021). Others propose reasoning-specific metrics to assess aspects like coherency, redundancy, factuality (Golovneva et al., 2022), informativeness (Chen et al., 2022), robustness (Wang & Zhao, 2024), and contextual faithfulness (Ming et al., 2025). Under the LLM-as-Judge paradigm, recent works prompt powerful LLMs like GPT-4 to directly evaluate reasoning chains generated by other models (Hao et al., 2024a; Sun et al., 2024b). However, as reasoning tasks grow in complexity, evaluation becomes increasingly difficult, even for frontier models—if a model cannot perform a task, how can it judge if the task is done correctly? Thus, developing robust and accurate methods to evaluate reasoning beyond outcomes remains a significant and unresolved challenge." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.716, + 0.363, + 0.731 + ], + "angle": 0, + "content": "6.2.2 Understanding Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.742, + 0.884, + 0.802 + ], + "angle": 0, + "content": "Recent research on understanding LLM reasoning has advanced along two complementary paths: empirical studies that evaluate and analyze performance through carefully designed and controlled experiments, and formal analyses that introduce new frameworks to systematically explore the underlying mechanisms of how LLMs reason." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.819, + 0.882, + 0.926 + ], + "angle": 0, + "content": "Empirical analysis of reasoning Recent LLMs exhibit strong performance across diverse tasks, suggesting some level of reasoning capability. However, whether these skills are general and transferable or merely specialized for tasks encountered during pretraining remains an open and debated question. To address this, several empirical studies have sought to understand and enhance LLM capabilities across various reasoning forms: abstractive reasoning (Wu et al., 2024a; He & Lu, 2024), compositional reasoning (Bhargava & Ng, 2022; Li et al., 2024g), inductive reasoning (Yang et al., 2024f; Han et al., 2024b), abductive reasoning (Jung et al., 2022; Pareschi, 2023), deductive reasoning (Poesia et al., 2024; Seals & Shalin, 2024; Feng et al.," + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "37" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.104, + 0.884, + 0.226 + ], + "angle": 0, + "content": "2024a), logical reasoning (Wan et al., 2024b; Han et al., 2024a; Xu et al., 2025c), commonsense reasoning (Lin et al., 2021; Liang et al., 2023a; Sun et al., 2024a), math reasoning (Ahn et al., 2024; Mirzadeh et al., 2025), and social reasoning (Gandhi et al., 2023). Notably, Arkoudas (2023) qualitatively evaluate GPT-4 on 21 diverse reasoning problems, concluding that despite occasional analytical success, GPT-4 remains incapable of true reasoning. Similarly, Wu et al. (2024a) empirically investigate abstractive reasoning and find that while LLMs achieve nontrivial performance on counterfactual tasks, their performance consistently degrades compared to default conditions, indicating reliance on narrow, non-transferable procedures. Mondorf & Plank (2024) provide a comprehensive survey on recent evaluations of LLM reasoning abilities." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.232, + 0.885, + 0.491 + ], + "angle": 0, + "content": "Beyond assessing LLM reasoning capabilities, there is growing interest in evaluating how test-time scaling methods enhance reasoning. The empirical success of CoT prompting has spurred extensive research into its mechanisms. Wang et al. (2023a) and Madaan et al. (2023a) investigate the role of demonstrations, finding that LLMs prioritize pattern consistency over accuracy and exhibit robustness to invalid demonstrations—particularly in mathematical reasoning, where incorrect equations often do not hinder performance. They also emphasize the importance of relevant rationales and logical progression in CoT prompts. Additionally, Madaan et al. (2023a) conclude that CoT aids models by supplementing missing information, such as commonsense knowledge, and reinforcing task understanding. From a modeling perspective, Dutta et al. (2024) analyze CoT through neural mechanisms, revealing that LLMs process input context and generated CoT via parallel pathways. They find that early layers (e.g., layers 1-16 in Llama-2 7B (Touvron et al., 2023)) rely on pretraining knowledge, while later layers specialize in in-context learning, with answer-writing heads emerging in the final layers. From a task perspective, Sprague et al. (2024a) conduct a meta-analysis of 100 CoT papers, showing that CoT significantly improves performance on mathematical, logical, and algorithmic reasoning tasks but offers minimal gains for non-symbolic tasks. Their analysis suggests that CoT excels in computational steps but struggles with tool-augmented reasoning. On the training front, Gao et al. (2024a); Zhang et al. (2025b); Yeo et al. (2025) explore key supervised fine-tuning (SFT) and reinforcement learning (RL) factors that optimize LLM training strategies for enhancing CoT reasoning." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.511, + 0.884, + 0.738 + ], + "angle": 0, + "content": "Formal analysis of reasoning There is increasing interest in formal analyses, which use structured and logical proofs to systematically evaluate and improve the reasoning capabilities of LLMs. Han et al. (2022) introduce FOLIO, a dataset designed to assess models' ability to derive correct conclusions from premises using first-order logic reasoning. Similarly, Saparov & He (2023) develop a benchmark evaluating LLMs on symbolic ontologies, revealing that models often struggle with proof planning and rely on knowledge retrieval rather than genuine reasoning. These findings highlight the potential of neurosymbolic methods to better understand LLM reasoning. Recent work also explores formal analysis techniques to enhance LLM reasoning. For instance, Pan et al. (2023) use LLMs to translate natural language problems into symbolic formulations, which are then processed by deterministic symbolic solvers for inference. (Li et al., 2025b) demonstrate the promise of leveraging LLMs' symbolic reasoning for mathematical problem-solving. Other studies focus on domain-specific reasoning: Fang et al. (2024a) propose an LLM-based agent for text-based games, designed to tackle symbolic challenges and achieve in-game objectives, while Nahid & Rafiei (2024) introduce a framework to enhance LLMs' symbolic reasoning by normalizing web tables. These studies reveal LLMs' limitations in structured reasoning while emphasizing the value of integrating formal analysis to strengthen their capabilities." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.758, + 0.884, + 0.925 + ], + "angle": 0, + "content": "Theoretical analysis of ICL and CoT reasoning The success of in-context learning (ICL) and CoT prompting in enhancing LLM reasoning has sparked significant interest in understanding their underlying mechanisms from theoretical perspectives. Extensive prior studies on ICL suggest that transformer-based in-context learners effectively implement various learning algorithms, encoding implicit, context-dependent models for generation within their hidden activations—models that can be trained through demonstrations as these activations are computed. For instance, Akyurek et al. (2022) investigate this hypothesis in the context of linear regression models, while Von Oswald et al. (2023) and Dai et al. (2023) explore how transformer-based in-context learners function as meta-optimizers, effectively learning models via gradient descent during their forward pass. From a Bayesian inference perspective, Xie et al. (2022); Zhang et al. (2023) and Wang et al. (2023e) demonstrate that transformer-based in-context learners can achieve the Bayes-optimal predictor when demonstrations are selected based on a shared latent concept variable, such" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "38" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.104, + 0.884, + 0.165 + ], + "angle": 0, + "content": "as format or task information, even in the presence of distribution mismatches between demonstrations and training data. Additionally, Elhage et al. (2021); Olsson et al. (2022) examine ICL through the concept of \"induction heads\" - attention heads that implement a simple algorithm to complete tasks, providing evidence that induction heads may underlie much of the in-context learning observed in transformer-based models." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.172, + 0.885, + 0.385 + ], + "angle": 0, + "content": "The body of work exploring the theoretical insights into CoT mechanisms remains relatively limited, with most studies focusing on the expressiveness of LLMs when using CoT. A pioneering study by Feng et al. (2023a) investigates LLMs with CoT for solving mathematical and decision-making problems. Using circuit complexity theory (Arora & Barak, 2009), they demonstrate that bounded-depth transformers cannot solve basic arithmetic or equation tasks unless the model size grows super-polynomially. In contrast, they prove that constant-size models can solve these tasks, along with a wide range of decision-making problems such as Dynamic Programming, by generating CoT derivations in a common mathematical language. Li et al. (2024h) extend these findings, providing a tighter upper bound on the expressiveness of constant-depth transformers with CoT. However, these studies do not explore how the length of a CoT affects model reasoning power. To address this gap, Merrill & Sabharwal (2024) find that a logarithmic number of intermediate steps (relative to input length) offers only marginal gains over standard transformers, while a linear number of steps under the assumption of projected pre-norm (a slight generalization of standard pre-norm) enables the recognition of all regular languages. Furthermore, polynomially many steps, combined with generalized pre-norm, allow transformers to recognize exactly the class of polynomial-time solvable problems." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.405, + 0.571, + 0.422 + ], + "angle": 0, + "content": "6.2.3 Data Challenges in Advancing Reasoning Capabilities" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.433, + 0.884, + 0.556 + ], + "angle": 0, + "content": "Challenges in scaling question and outcome supervision for RL As discussed earlier, development trends in both general and task-specific domains are converging, with a focus on employing end-to-end RL to minimize inductive bias and push the boundaries of intelligence. Frontier models now incorporate competition-level problems annually for training, as these represent the most challenging tasks and are annotated with high-quality answers by human experts. However, we are nearing the limits of available human-annotated data, raising the question of whether methods beyond human labeling can enable the continuous scaling of RL. This challenge is particularly relevant in domains where prompts are not easily verifiable, such as open-ended generation, software engineering, and most agentic tasks." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.576, + 0.884, + 0.713 + ], + "angle": 0, + "content": "Challenges in reward modeling Early studies have investigated the feasibility of process supervision (Lightman et al., 2024) and its effectiveness in inference-time scaling (Snell et al., 2025). However, its high annotation costs and ambiguous definition—particularly in long CoT scenarios where self-reflection is encouraged—have limited its adoption in large-scale reinforcement learning. Despite these challenges, the key advantage of accurate process supervision is its ability to reduce hallucinations, making it essential for automated reasoning and knowledge discovery. Additionally, as discussed in Section 4.2, the training paradigm for reward models is closely tied to that of reasoning models. This raises concerns about whether allocating the same annotation budget directly to reasoning models could lead to more stable and general improvements, potentially limiting the gains achievable through inference-time scaling." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.737, + 0.248, + 0.753 + ], + "angle": 0, + "content": "7 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.774, + 0.885, + 0.926 + ], + "angle": 0, + "content": "In this work, we provide a timely and comprehensive survey on LLM reasoning. We first formalize the goal of LLM reasoning and consolidate past research by categorizing reasoning techniques along two dimensions: regimes and architectures. Within each of these dimensions, we review both input and output perspectives in detail. Our review highlights emerging trends, including the shift from inference-time scaling to learning-to-reason regimes, and the transition from standalone models to agentic systems. We also review and compare a wide range of learning algorithms, including supervised fine-tuning and reinforcement learning, as well as the training of reasoners and training of verifiers. Despite these advancements, challenges remain in evaluating reasoning and understanding real reasoning mechanisms as well as addressing data challenges in advancing reasoning capabilities. We encourage future research to further explore these trends, such as inference-aware learning-to-reason and automated multi-agent design, to enhance LLM reasoning." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.961 + ], + "angle": 0, + "content": "39" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.114, + 0.104, + 0.254, + 0.12 + ], + "angle": 0, + "content": "Acknowledgments" + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.129, + 0.687, + 0.145 + ], + "angle": 0, + "content": "We thank M Saiful Bari, Semih Yavuz and Yingbo Zhou for helpful discussions." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.163, + 0.216, + 0.179 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.187, + 0.882, + 0.218 + ], + "angle": 0, + "content": "American invitational mathematics examination. Mathematical Association of America, 2025. https://maa.org/maa-invitational-competitions/." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.226, + 0.884, + 0.272 + ], + "angle": 0, + "content": "Rishabh Agarwal, Avi Singh, Lei Zhang, Bernd Bohnet, Luis Rosias, Stephanie Chan, Biao Zhang, Ankesh Anand, Zaheer Abbas, Azade Nova, et al. Many-shot in-context learning. Advances in Neural Information Processing Systems, 37:76930-76966, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.28, + 0.884, + 0.356 + ], + "angle": 0, + "content": "Sweta Agrawal, Chunting Zhou, Mike Lewis, Luke Zettlemoyer, and Marjan Ghazvininejad. In-context examples selection for machine translation. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Findings of the Association for Computational Linguistics: ACL 2023, pp. 8857-8873, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-acl.564. URL https://aclanthology.org/2023-findings-acl.564/." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.364, + 0.882, + 0.41 + ], + "angle": 0, + "content": "Arash Ahmadian, Chris Cremer, Matthias Gallé, Marzieh Fadaee, Julia Kreutzer, Olivier Pietquin, Ahmet Üstün, and Sara Hooker. Back to basics: Revisiting reinforce style optimization for learning from human feedback in llms. arXiv preprint arXiv:2402.14740, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.418, + 0.884, + 0.495 + ], + "angle": 0, + "content": "Janice Ahn, Rishu Verma, Renze Lou, Di Liu, Rui Zhang, and Wenpeng Yin. Large language models for mathematical reasoning: Progresses and challenges. In Neele Falk, Sara Papi, and Mike Zhang (eds.), Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics: Student Research Workshop, pp. 225-237, St. Julian's, Malta, March 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.eacl-srw.17/." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.502, + 0.882, + 0.548 + ], + "angle": 0, + "content": "Afra Feyza Akyürek, Ekin Akyürek, Aman Madaan, Ashwin Kalyan, Peter Clark, Derry Wijaya, and Niket Tandon. Rl4f: Generating natural language feedback with reinforcement learning for repairing model outputs. arXiv preprint arXiv:2305.08844, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.556, + 0.884, + 0.602 + ], + "angle": 0, + "content": "Ekin Akyürek, Dale Schuurmans, Jacob Andreas, Tengyu Ma, and Denny Zhou. What learning algorithm is in-context learning? investigations with linear models. In The Eleventh International Conference on Learning Representations, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.61, + 0.884, + 0.655 + ], + "angle": 0, + "content": "AlphaProof and AlphaGeometry teams. AI achieves silver-medal standard solving international mathematical olympiad problems. https://deepmind.google/discover/blog/ai-solves-imo-problems-at-silver-medal-level/, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.664, + 0.712, + 0.68 + ], + "angle": 0, + "content": "Konstantine Arkoudas. Gpt-4 can't reason. arXiv preprint arXiv:2308.03762, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.687, + 0.884, + 0.717 + ], + "angle": 0, + "content": "Sanjeev Arora and Boaz Barak. Computational complexity: a modern approach. Cambridge University Press, 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.726, + 0.884, + 0.818 + ], + "angle": 0, + "content": "Krishna Aswani, Huilin Lu, Pranav Patankar, Priya Dhalwani, Xue Tan, Jayant Ganeshmohan, and Simon Lacasse. Auto-evolve: Enhancing large language model's performance via self-reasoning framework. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (eds.), Findings of the Association for Computational Linguistics: EMNLP 2024, pp. 13243-13257, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-emnlp.774. URL https://aclanthology.org/2024-findings-emnlp.774/." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.825, + 0.884, + 0.872 + ], + "angle": 0, + "content": "Jacob Austin, Augustus Odena, Maxwell Nye, Maarten Bosma, Henryk Michalewski, David Dohan, Ellen Jiang, Carrie Cai, Michael Terry, Quoc Le, et al. Program synthesis with large language models. arXiv preprint arXiv:2108.07732, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.879, + 0.884, + 0.925 + ], + "angle": 0, + "content": "Mohammad Gheshlaghi Azar, Zhaohan Daniel Guo, Bilal Piot, Remi Munos, Mark Rowland, Michal Valko, and Daniele Calandriello. A general theoretical paradigm to understand learning from human preferences. In International Conference on Artificial Intelligence and Statistics, pp. 4447-4455. PMLR, 2024." + }, + { + "type": "list", + "bbox": [ + 0.116, + 0.187, + 0.884, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "40" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.103, + 0.885, + 0.152 + ], + "angle": 0, + "content": "Zhangir Azerbayev, Hailey Schoelkopf, Keiran Paster, Marco Dos Santos, Stephen Marcus McAleer, Albert Q Jiang, Jia Deng, Stella Biderman, and Sean Welleck. LLemma: An open language model for mathematics. In International Conference on Learning Representations (ICLR), 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.161, + 0.885, + 0.21 + ], + "angle": 0, + "content": "Yuntao Bai, Andy Jones, Kamal Ndousse, Amanda Askell, Anna Chen, Nova DasSarma, Dawn Drain, Stanislav Fort, Deep Ganguli, Tom Henighan, et al. Training a helpful and harmless assistant with reinforcement learning from human feedback. arXiv preprint arXiv:2204.05862, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.22, + 0.885, + 0.281 + ], + "angle": 0, + "content": "Ananth Balashankar, Ziteng Sun, Jonathan Berant, Jacob Eisenstein, Michael Collins, Adrian Hutter, Jong Lee, Chirag Nagpal, Flavien Prost, Aradhana Sinha, Ananda Theertha Suresh, and Ahmad Beirami. Infalign: Inference-aware language model alignment. CoRR, abs/2412.19792, 2024. doi: 10.48550/ARXIV.2412.19792. URL https://doi.org/10.48550/arXiv.2412.19792." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.293, + 0.885, + 0.341 + ], + "angle": 0, + "content": "Bruno Barras, Samuel Boutin, Cristina Cornes, Judicael Courant, Jean-Christophe Filliatre, Eduardo Gimenez, Hugo Herbelin, Gerard Huet, Cesar Munoz, Chetan Murthy, et al. The Coq proof assistant reference manual: Version 6.1. PhD thesis, Inria, 1997." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.352, + 0.885, + 0.399 + ], + "angle": 0, + "content": "Richard Bellman. Dynamic programming and stochastic control processes. Information and Control, 1 (3):228-239, 1958. ISSN 0019-9958. doi: https://doi.org/10.1016/S0019-9958(58)80003-0. URL https://www.sciencedirect.com/science/article/pii/S0019995858800030." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.41, + 0.885, + 0.456 + ], + "angle": 0, + "content": "Bespoke Labs. Bespoke-stratos: The unreasonable effectiveness of reasoning distillation. www.bespokelabs.ai/blog/bespoke-stratos-the-unreasonable-effectiveness-of-reasoning-distillation, 2025. Accessed: 2025-01-22." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.468, + 0.885, + 0.53 + ], + "angle": 0, + "content": "Maciej Besta, Nils Blach, Ales Kubicek, Robert Gerstenberger, Michal Podstawski, Lukas Gianinazzi, Joanna Gajda, Tomasz Lehmann, Hubert Niewiadomski, Piotr Nczyk, et al. Graph of thoughts: Solving elaborate problems with large language models. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 38, pp. 17682-17690, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.541, + 0.885, + 0.589 + ], + "angle": 0, + "content": "Maciej Besta, Julia Barth, Eric Schreiber, Ales Kubicek, Afonso Catarino, Robert Gerstenberger, Piotr Nczyk, Patrick Iff, Yueling Li, Sam Houliston, et al. Reasoning language models: A blueprint. arXiv preprint arXiv:2501.11223, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.6, + 0.885, + 0.647 + ], + "angle": 0, + "content": "Prajjwal Bhargava and Vincent Ng. Commonsense knowledge reasoning and generation with pre-trained language models: A survey. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 36, pp. 12317-12325, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.658, + 0.885, + 0.706 + ], + "angle": 0, + "content": "Zhenni Bi, Kai Han, Chuanjian Liu, Yehui Tang, and Yunhe Wang. Forest-of-thought: Scaling test-time compute for enhancing llm reasoning. arXiv preprint arXiv:2412.09078, 2024. URL https://arxiv.org/pdf/2412.09078." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.716, + 0.885, + 0.765 + ], + "angle": 0, + "content": "Vadim Borisov, Kathrin Sessler, Tobias Leemann, Martin Pawelczyk, and Gjergji Kasneci. Language models are realistic tabular data generators. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=cEygmmQNOeI." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.774, + 0.885, + 0.822 + ], + "angle": 0, + "content": "Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling. arXiv preprint arXiv:2407.21787, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.833, + 0.885, + 0.925 + ], + "angle": 0, + "content": "Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models are few-shot learners, 2020. URL https://arxiv.org/abs/2005.14165." + }, + { + "type": "list", + "bbox": [ + 0.115, + 0.103, + 0.885, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "41" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.103, + 0.885, + 0.15 + ], + "angle": 0, + "content": "Oana-Maria Camburu, Tim Rocktäschel, Thomas Lukasiewicz, and Phil Blunsom. e-snli: Natural language inference with natural language explanations. Advances in Neural Information Processing Systems, 31, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.16, + 0.885, + 0.209 + ], + "angle": 0, + "content": "Lorenzo Canese, Gian Carlo Cardarilli, Luca Di Nunzio, Rocco Fazzolari, Daniele Giardino, Marco Re, and Sergio Spanò. Multi-agent reinforcement learning: A review of challenges and applications. Applied Sciences, 11(11):4948, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.218, + 0.885, + 0.297 + ], + "angle": 0, + "content": "Yihan Cao, Shuyi Chen, Ryan Liu, Zhiruo Wang, and Daniel Fried. API-assisted code generation for question answering on varied table structures. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pp. 14536-14548, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.897. URL https://aclanthology.org/2023.emnlp-main.897/." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.305, + 0.885, + 0.355 + ], + "angle": 0, + "content": "Stephen Casper, Xander Davies, Claudia Shi, Thomas Krendl Gilbert, Jérémy Scheurer, Javier Rando, Rachel Freedman, Tomasz Korbak, David Lindner, Pedro Freire, et al. Open problems and fundamental limitations of reinforcement learning from human feedback. arXiv preprint arXiv:2307.15217, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.362, + 0.885, + 0.41 + ], + "angle": 0, + "content": "Chi-Min Chan, Weize Chen, Yusheng Su, Jianxuan Yu, Wei Xue, Shanghang Zhang, Jie Fu, and Zhiyuan Liu. Chateval: Towards better llm-based evaluators through multi-agent debate. arXiv preprint arXiv:2308.07201, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.419, + 0.885, + 0.453 + ], + "angle": 0, + "content": "Edward Y Chang. Socrasynth: Multi-llm reasoning with conditional statistics. arXiv preprint arXiv:2402.06634, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.461, + 0.885, + 0.508 + ], + "angle": 0, + "content": "Baian Chen, Chang Shu, Ehsan Shareghi, Nigel Collier, Karthik Narasimhan, and Shunyu Yao. Fireact: Toward language agent fine-tuning. CoRR, abs/2310.05915, 2023a. doi: 10.48550/ARXIV.2310.05915. URL https://doi.org/10.48550/arXiv.2310.05915." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.518, + 0.885, + 0.567 + ], + "angle": 0, + "content": "Bei Chen, Fengji Zhang, Anh Nguyen, Daoguang Zan, Zeqi Lin, Jian-Guang Lou, and Weizhu Chen. Codet: Code generation with generated tests. In The Eleventh International Conference on Learning Representations, 2023b. URL https://openreview.net/forum?id=ktrw68Cmu9c." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.575, + 0.885, + 0.609 + ], + "angle": 0, + "content": "Guoxin Chen, Minpeng Liao, Chengxi Li, and Kai Fan. Alphamath almost zero: process supervision without process. arXiv preprint arXiv:2405.03553, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.617, + 0.885, + 0.665 + ], + "angle": 0, + "content": "Hanjie Chen, Faeze Brahman, Xiang Ren, Yangfeng Ji, Yejin Choi, and Swabha Swayamdipta. Information-theoretic evaluation of free-text rationales with conditional \\(\\mathcal{V}\\)-information. In Workshop on Trustworthy and Socially Responsible Machine Learning, NeurIPS 2022, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.674, + 0.885, + 0.709 + ], + "angle": 0, + "content": "Justin Chih-Yao Chen, Swarnadeep Saha, and Mohit Bansal. Reconcile: Round-table conference improves reasoning via consensus among diverse llms. arXiv preprint arXiv:2309.13007, 2023c." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.716, + 0.885, + 0.763 + ], + "angle": 0, + "content": "Justin Chih-Yao Chen, Archiki Prasad, Swarnadeep Saha, Elias Stengel-Eskin, and Mohit Bansal. Magicore: Multi-agent, iterative, coarse-to-fine refinement for reasoning, 2024b. URL https://arxiv.org/abs/2409.12147." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.773, + 0.885, + 0.927 + ], + "angle": 0, + "content": "Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Ponde de Oliveira Pinto, Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, Greg Brockman, Alex Ray, Raul Puri, Gretchen Krueger, Michael Petrov, Heidi Khlaaf, Girish Sastry, Pamela Mishkin, Brooke Chan, Scott Gray, Nick Ryder, Mikhail Pavlov, Alethea Power, Lukasz Kaiser, Mohammad Bavarian, Clemens Winter, Philippe Tillet, Felipe Petroski Such, Dave Cummings, Matthias Plappert, Fotios Chantzis, Elizabeth Barnes, Ariel Herbert-Voss, William Hebgen Guss, Alex Nichol, Alex Paino, Nikolas Tezak, Jie Tang, Igor Babuschkin, Suchir Balaji, Shantanu Jain, William Saunders, Christopher Hesse, Andrew N. Carr, Jan Leike, Josh Achiam, Vedant Misra, Evan Morikawa, Alec Radford, Matthew Knight, Miles Brundage, Mira Murati, Katie Mayer, Peter Welinder, Bob McGrew, Dario Amodei, Sam McCandlish, Ilya Sutskever, and Wojciech Zaremba. Evaluating large language models trained on code, 2021." + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.103, + 0.885, + 0.927 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "42" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.103, + 0.885, + 0.137 + ], + "angle": 0, + "content": "Pei Chen, Boran Han, and Shuai Zhang. Comm: Collaborative multi-agent, multi-reasoning-path prompting for complex problem solving. arXiv preprint arXiv:2404.17729, 2024c." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.146, + 0.885, + 0.224 + ], + "angle": 0, + "content": "Wei-Lin Chen, Cheng-Kuang Wu, Yun-Nung Chen, and Hsin-Hsi Chen. Self-ICL: Zero-shot in-context learning with self-generated demonstrations. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pp. 15651–15662, Singapore, December 2023d. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.968. URL https://aclanthology.org/2023.emnlp-main.968/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.235, + 0.885, + 0.298 + ], + "angle": 0, + "content": "Wenhu Chen. Large language models are few(1)-shot table reasoners. In Andreas Vlachos and Isabelle Augenstein (eds.), Findings of the Association for Computational Linguistics: EACL 2023, pp. 1120-1130, Dubrovnik, Croatia, May 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.findings-eacl.83. URL https://aclanthology.org/2023-findings-eacl.83/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.308, + 0.885, + 0.356 + ], + "angle": 0, + "content": "Wenhu Chen, Xueguang Ma, Xinyi Wang, and William W. Cohen. Program of thoughts prompting: Disentangling computation from reasoning for numerical reasoning tasks. Transactions on Machine Learning Research, 2023e. ISSN 2835-8856. URL https://openreview.net/forum?id=YfZ4ZPt8zd." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.367, + 0.885, + 0.414 + ], + "angle": 0, + "content": "Xingyu Chen, Jiahao Xu, Tian Liang, Zhiwei He, Jianhui Pang, Dian Yu, Linfeng Song, Qiuzhi Liu, Mengfei Zhou, Zhuosheng Zhang, et al. Do not think that much for \\(2 + 3 = ?\\) on the overthinking of o1-like llms. arXiv preprint arXiv:2412.21187, 2024d." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.425, + 0.885, + 0.516 + ], + "angle": 0, + "content": "Zehui Chen, Kuikun Liu, Qiuchen Wang, Wenwei Zhang, Jiangning Liu, Dahua Lin, Kai Chen, and Feng Zhao. Agent-flan: Designing data and methods of effective agent tuning for large language models. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Findings of the Association for Computational Linguistics, ACL 2024, Bangkok, Thailand and virtual meeting, August 11-16, 2024, pp. 9354-9366. Association for Computational Linguistics, 2024e. URL https://doi.org/10.18653/v1/2024-findings-acl.557." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.528, + 0.885, + 0.561 + ], + "angle": 0, + "content": "Zihan Chen, Song Wang, Zhen Tan, Jundong Li, and Cong Shen. Maple: Many-shot adaptive pseudo-labeling for in-context learning, 2025. URL https://arxiv.org/abs/2505.16225." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.572, + 0.885, + 0.634 + ], + "angle": 0, + "content": "Zhoujun Cheng, Tianbao Xie, Peng Shi, Chengzu Li, Rahul Nadkarni, Yushi Hu, Caiming Xiong, Dragomir Radev, Mari Ostendorf, Luke Zettlemoyer, Noah A. Smith, and Tao Yu. Binding language models in symbolic languages. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=1H1PV42cbF." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.645, + 0.885, + 0.707 + ], + "angle": 0, + "content": "Yinlam Chow, Guy Tennenholtz, Izzeddin Gur, Vincent Zhuang, Bo Dai, Sridhar Thiagarajan, Craig Boutilier, Rishabh Agarwal, Aviral Kumar, and Aleksandra Faust. Inference-aware fine-tuning for best-of-n sampling in large language models. CoRR, abs/2412.15287, 2024. doi: 10.48550/ARXIV.2412.15287. URL https://doi.org/10.48550/arXiv.2412.15287." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.718, + 0.885, + 0.751 + ], + "angle": 0, + "content": "Miruna Clinciu, Arash Eshghi, and Helen Hastie. A study of automatic metrics for the evaluation of natural language explanations. arXiv preprint arXiv:2103.08545, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.762, + 0.885, + 0.809 + ], + "angle": 0, + "content": "Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, et al. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.82, + 0.885, + 0.853 + ], + "angle": 0, + "content": "Jonathan Cook, Tim Rocktäschel, Jakob Foerster, Dennis Aumiller, and Alex Wang. Ticking all the boxes: Generated checklists improve llm evaluation and generation. arXiv preprint arXiv:2410.03608, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.864, + 0.885, + 0.925 + ], + "angle": 0, + "content": "Ganqu Cui, Lifan Yuan, Zefan Wang, Hanbin Wang, Wendi Li, Bingxiang He, Yuchen Fan, Tianyu Yu, Qixin Xu, Weize Chen, Jiarui Yuan, Huayu Chen, Kaiyan Zhang, Xingtai Lv, Shuo Wang, Yuan Yao, Xu Han, Hao Peng, Yu Cheng, Zhiyuan Liu, Maosong Sun, Bowen Zhou, and Ning Ding. Process reinforcement through implicit rewards, 2025. URL https://arxiv.org/abs/2502.01456." + }, + { + "type": "list", + "bbox": [ + 0.113, + 0.103, + 0.885, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "43" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.103, + 0.885, + 0.151 + ], + "angle": 0, + "content": "Chris Cummins, Volker Seeker, Dejan Grubisic, Mostafa Elhoushi, Youwei Liang, Baptiste Roziere, Jonas Gehring, Fabian Gloeckle, Kim Hazelwood, Gabriel Synnaeve, et al. Large language models for compiler optimization. arXiv preprint arXiv:2309.07062, 2023. URL https://arxiv.org/abs/2309.07062." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.157, + 0.885, + 0.236 + ], + "angle": 0, + "content": "Damai Dai, Yutao Sun, Li Dong, Yaru Hao, Shuming Ma, Zhifang Sui, and Furu Wei. Why can GPT learn in context? language models secretly perform gradient descent as meta-optimizers. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Findings of the Association for Computational Linguistics: ACL 2023, pp. 4005–4019, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-acl.247. URL https://aclanthology.org/2023-findings-acl.247/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.242, + 0.885, + 0.29 + ], + "angle": 0, + "content": "Ning Dai, Zheng Wu, Renjie Zheng, Ziyun Wei, Wenlei Shi, Xing Jin, Guanlin Liu, Chen Dun, Liang Huang, and Lin Yan. Process supervision-guided policy optimization for code generation. arXiv preprint arXiv:2410.17621, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.297, + 0.885, + 0.346 + ], + "angle": 0, + "content": "Mehul Damani, Idan Shenfeld, Andi Peng, Andreea Bobu, and Jacob Andreas. Learning how hard to think: Input-adaptive allocation of LM computation. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=6qUUgw9bAZ." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.352, + 0.885, + 0.385 + ], + "angle": 0, + "content": "Debrup Das, Debopriyo Banerjee, Somak Aditya, and Ashish Kulkarni. Mathsensei: A tool-augmented large language model for mathematical reasoning. arXiv preprint arXiv:2402.17231, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.391, + 0.885, + 0.453 + ], + "angle": 0, + "content": "Leonardo De Moura, Soonho Kong, Jeremy Avigad, Floris Van Doorn, and Jakob von Raumer. The lean theorem prover (system description). In _Automated Deduction-CADE-25: 25th International Conference on Automated Deduction_, Berlin, Germany, August 1-7, 2015, Proceedings 25, pp. 378-388. Springer, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.461, + 0.885, + 0.871 + ], + "angle": 0, + "content": "DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, Yu Wu, Z. F. Wu, Zhibin Gou, Zhihong Shao, Zhuoshu Li, Ziyi Gao, Aixin Liu, Bing Xue, Bingxuan Wang, Bochao Wu, Bei Feng, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao, Guanting Chen, Guowei Li, H. Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Qu, Hui Li, Jianzhong Guo, Jiashi Li, Jiawei Wang, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, J. L. Cai, Jiaqi Ni, Jian Liang, Jin Chen, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Liang Zhao, Litong Wang, Liyue Zhang, Lei Xu, Leyi Xia, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Meng Li, Miaojun Wang, Mingming Li, Ning Tian, Panpan Huang, Peng Zhang, Qiancheng Wang, Qinyu Chen, Qiushi Du, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, R. J. Chen, R. L. Jin, Ruyi Chen, Shanghai Lu, Shangyan Zhou, Shanhuang Chen, Shengfeng Ye, Shiyu Wang, Shuiping Yu, Shunfeng Zhou, Shuting Pan, S. S. Li, Shuang Zhou, Shaoqing Wu, Shengfeng Ye, Tao Yun, Tian Pei, Tianyu Sun, T. Wang, Wangding Zeng, Wanjia Zhao, Wen Liu, Wenfeng Liang, Wenjun Gao, Wenqin Yu, Wentao Zhang, W. L. Xiao, Wei An, Xiaodong Liu, Xiaohan Wang, Xiaokang Chen, Xiaotao Nie, Xin Cheng, Xin Liu, Xin Xie, Xingchao Liu, Xinyu Yang, Xinyuan Li, Xuecheng Su, Xuheng Lin, X. Q. Li, Xiangyue Jin, Xiaojin Shen, Xiaosha Chen, Xiaowen Sun, Xiaoxiang Wang, Xinnan Song, Xinyi Zhou, Xianzu Wang, Xinxia Shan, Y. K. Li, Y. Q. Wang, Y. X. Wei, Yang Zhang, Yanhong Xu, Yao Li, Yao Zhao, Yaofeng Sun, Yaohui Wang, Yi Yu, Yichao Zhang, Yifan Shi, Yiliang Xiong, Ying He, Yishi Piao, Yisong Wang, Yixuan Tan, Yiyang Ma, Yiyuan Liu, Yongqiang Guo, Yuan Ou, Yuduan Wang, Yue Gong, Yuheng Zou, Yujia He, Yunfan Xiong, Yuxiang Luo, Yuxiang You, Yuxuan Liu, Yuyang Zhou Y. X. Zhu Yanhong Xu Yanping Huang Yaohui Li Yi ZhengYuchen ZhuYunxian Ma Ying Tang Yukun Zha Yuting YanZ.Z.Ren Zehui Ren,Zhangli ShaZhe FuZhean XuZhenda Xie Zhengyan Zhang,Zhenwen Hao,Zhicheng Ma,Zhigang Yan,Zhiyu WuZihui GuZijia ZhuZijun Liu Zilin Li Ziwei Xie Ziyang Song Zizheng Pan Zhen Huang Zhipeng Xu Zhongyu Zhang and Zhen Zhang.Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning 2025. URL https://arxiv.org/abs/2501.12948." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.878, + 0.885, + 0.927 + ], + "angle": 0, + "content": "Shumin Deng, Ningyu Zhang, Nay Oo, and Bryan Hooi. Towards a unified view of answer calibration for multi-step reasoning. In Bhavana Dalvi Mishra, Greg Durrett, Peter Jansen, Ben Lipkin, Danilo Neves Ribeiro, Lionel Wong, Xi Ye, and Wenting Zhao (eds.), Proceedings of the 2nd Workshop on Natural" + }, + { + "type": "list", + "bbox": [ + 0.115, + 0.103, + 0.885, + 0.927 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "44" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.128, + 0.103, + 0.885, + 0.137 + ], + "angle": 0, + "content": "Language Reasoning and Structured Explanations (@ACL 2024), pp. 25-38, Bangkok, Thailand, August 2024a. Association for Computational Linguistics. URL https://aclanthology.org/2024.nlrse-1.3/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.144, + 0.887, + 0.18 + ], + "angle": 0, + "content": "Yihe Deng, Weitong Zhang, Zixiang Chen, and Quanquan Gu. Rephrase and respond: Let large language models ask better questions for themselves. arXiv preprint arXiv:2311.04205, 2023a." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.187, + 0.885, + 0.222 + ], + "angle": 0, + "content": "Yuntian Deng, Kiran Prasad, Roland Fernandez, Paul Smolensky, Vishrav Chaudhary, and Stuart M. Shieber. Implicit chain of thought reasoning via knowledge distillation. CoRR, abs/2311.01460, 2023b." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.228, + 0.885, + 0.264 + ], + "angle": 0, + "content": "Yuntian Deng, Yejin Choi, and Stuart M. Shieber. From explicit cot to implicit cot: Learning to internalize cot step by step. CoRR, abs/2405.14838, 2024b. URL https://doi.org/10.48550/arXiv.2405.14838." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.27, + 0.885, + 0.32 + ], + "angle": 0, + "content": "Darshan Deshpande, Selvan Sunitha Ravi, Sky CH-Wang, Bartosz Mielczarek, Anand Kannappan, and Rebecca Qian. Glider: Grading llm interactions and decisions using explainable ranking. arXiv preprint arXiv:2412.14140, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.328, + 0.885, + 0.378 + ], + "angle": 0, + "content": "Hanze Dong, Wei Xiong, Deepanshu Goyal, Yihan Zhang, Winnie Chow, Rui Pan, Shizhe Diao, Jipeng Zhang, Kashun Shum, and Tong Zhang. RAFT: reward ranked finetuning for generative foundation model alignment. Trans. Mach. Learn. Res., 2023, 2023. URL https://openreview.net/forum?id=m7p507zb1Y." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.385, + 0.885, + 0.463 + ], + "angle": 0, + "content": "Haoyu Dong and Zhiruo Wang. Large language models for tabular data: Progresses and future directions. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '24, pp. 2997-3000, New York, NY, USA, 2024. Association for Computing Machinery. ISBN 97898400704314. doi: 10.1145/3626772.3661384. URL https://doi.org/10.1145/3626772.3661384." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.472, + 0.885, + 0.521 + ], + "angle": 0, + "content": "Qingxiu Dong, Lei Li, Damai Dai, Ce Zheng, Jingyuan Ma, Rui Li, Heming Xia, Jingjing Xu, Zhiyong Wu, Baobao Chang, et al. A survey on in-context learning. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pp. 1107-1128, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.529, + 0.885, + 0.593 + ], + "angle": 0, + "content": "Jiri Dostál. Theory of problem solving. Procedia - Social and Behavioral Sciences, 174:2798-2805, 2015. ISSN 1877-0428. doi: https://doi.org/10.1016/j.sbspro.2015.01.970. URL https://www.sciencedirect.com/science/article/pii/S1877042815010290. International Conference on New Horizons in Education, INTE 2014, 25-27 June 2014, Paris, France." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.602, + 0.885, + 0.68 + ], + "angle": 0, + "content": "Shihan Dou, Yan Liu, Haoxiang Jia, Limao Xiong, Enyu Zhou, Wei Shen, Junjie Shan, Caishuang Huang, Xiao Wang, Xiaoran Fan, Zhiheng Xi, Yuhao Zhou, Tao Ji, Rui Zheng, Qi Zhang, Xuanjing Huang, and Tao Gui. Stepcoder: Improve code generation with reinforcement learning from compiler feedback. CoRR, abs/2402.01391, 2024a. doi: 10.48550/ARXIV.2402.01391. URL https://doi.org/10.48550/arXiv.2402.01391." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.688, + 0.885, + 0.768 + ], + "angle": 0, + "content": "Zi-Yi Dou, Cheng-Fu Yang, Xueqing Wu, Kai-Wei Chang, and Nanyun Peng. Re-ReST: Reflection-reinforced self-training for language agents. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (eds.), Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pp. 15394-15411, Miami, Florida, USA, November 2024b. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.861. URL https://aclanthology.org/2024.emnlp-main.861/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.776, + 0.885, + 0.855 + ], + "angle": 0, + "content": "Dheeru Dua, Shivanshu Gupta, Sameer Singh, and Matt Gardner. Successive prompting for decomposing complex questions. In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang (eds.), Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 1251-1265, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.emnlp-main.81. URL https://aclanthology.org/2022.emnlp-main.81." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.863, + 0.885, + 0.927 + ], + "angle": 0, + "content": "Jinhao Duan, Shiqi Wang, James Diffenderfer, Lichao Sun, Tianlong Chen, Bhavya Kailkhura, and Kaidi Xu. Reta: Recursively thinking ahead to improve the strategic reasoning of large language models. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 2232-2246, 2024." + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.103, + 0.887, + 0.927 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "45" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.103, + 0.885, + 0.137 + ], + "angle": 0, + "content": "Tom Duenas and Diana Ruiz. The path to superintelligence: A critical analysis of openai's five levels of ai progression. Research Gate, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.142, + 0.885, + 0.192 + ], + "angle": 0, + "content": "Subhabrata Dutta, Joykirat Singh, Soumen Chakrabarti, and Tanmoy Chakraborty. How to think step-by-step: A mechanistic understanding of chain-of-thought reasoning. Transactions on Machine Learning Research, 2024. ISSN 2835-8856. URL https://openreview.net/forum?id=uHLDkQVtyC." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.197, + 0.885, + 0.245 + ], + "angle": 0, + "content": "Nelson Elhage, Neel Nanda, Catherine Olsson, Tom Henighan, Nicholas Joseph, Ben Mann, Amanda Askell, Yuntao Bai, Anna Chen, Tom Conerly, et al. A mathematical framework for transformer circuits. Transformer Circuits Thread, 1(1):12, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.251, + 0.885, + 0.286 + ], + "angle": 0, + "content": "Andrew Estornell, Jean-Francois Ton, Yuanshun Yao, and Yang Liu. Acc-debate: An actor-critic approach to multi-agent debate. arXiv preprint arXiv:2411.00053, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.291, + 0.885, + 0.325 + ], + "angle": 0, + "content": "Kawin Ethayarajh, Winnie Xu, Niklas Muennighoff, Dan Jurafsky, and Douwe Kiela. Kto: Model alignment as prospect theoretic optimization. arXiv preprint arXiv:2402.01306, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.33, + 0.885, + 0.393 + ], + "angle": 0, + "content": "Meng Fang, Shilong Deng, Yudi Zhang, Zijing Shi, Ling Chen, Mykola Pechenizkiy, and Jun Wang. Large language models are neurosymbolic reasoners. Proceedings of the AAAI Conference on Artificial Intelligence, 38(16):17985-17993, Mar. 2024a. doi: 10.1609/aaai.v38i16.29754. URL https://ojs.aaai.org/index.php/AAAI/article/view/29754." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.4, + 0.885, + 0.463 + ], + "angle": 0, + "content": "Xi Fang, Weijie Xu, Fiona Anting Tan, Ziqing Hu, Jiani Zhang, Yanjun Qi, Srinivasan H. Sengamedu, and Christos Faloutsos. Large language models (LLMs) on tabular data: Prediction, generation, and understanding - a survey. Transactions on Machine Learning Research, 2024b. ISSN 2835-8856. URL https://openreview.net/forum?id=IZnrCGF9WI." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.468, + 0.885, + 0.517 + ], + "angle": 0, + "content": "Guhao Feng, Bohang Zhang, Yuntian Gu, Haotian Ye, Di He, and Liwei Wang. Towards revealing the mystery behind chain of thought: a theoretical perspective. Advances in Neural Information Processing Systems, 36:70757-70798, 2023a." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.523, + 0.885, + 0.601 + ], + "angle": 0, + "content": "Jiazhan Feng, Ruochen Xu, Junheng Hao, Hiteshi Sharma, Yelong Shen, Dongyan Zhao, and Weizhu Chen. Language models can be deductive solvers. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Findings of the Association for Computational Linguistics: NAACL 2024, pp. 4026-4042, Mexico City, Mexico, June 2024a. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-nacl.254. URL https://aclanthology.org/2024 findings-nacl.254/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.608, + 0.885, + 0.657 + ], + "angle": 0, + "content": "Xiachong Feng, Longxu Dou, Ella Li, Qinghao Wang, Haochuan Wang, Yu Guo, Chang Ma, and Lingpeng Kong. A survey on large language model-based social agents in game-theoretic scenarios, 2024b. URL https://arxiv.org/abs/2412.03920." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.662, + 0.885, + 0.711 + ], + "angle": 0, + "content": "Xidong Feng, Ziyu Wan, Muning Wen, Stephen Marcus McAleer, Ying Wen, Weinan Zhang, and Jun Wang. Alphazero-like tree-search can guide large language model decoding and training. arXiv preprint arXiv:2309.17179, 2023b." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.716, + 0.885, + 0.765 + ], + "angle": 0, + "content": "Chrisantha Fernando, Dylan Sunil Banarse, Henryk Michalewski, Simon Osindero, and Tim Rocktäschel. Promptbreeder: Self-referential self-improvement via prompt evolution. In *Forty-first International Conference on Machine Learning*, 2024. URL https://openreview.net/forum?id=9ZxnPZGmPU." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.77, + 0.885, + 0.819 + ], + "angle": 0, + "content": "Emily First, Markus N Rabe, Talia Ringer, and Yuriy Brun. Baldur: Whole-proof generation and repair with large language models. In Proceedings of the 31st ACM Joint European Software Engineering Conference and Symposium on the Foundations of Software Engineering, pp. 1229-1241, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.824, + 0.885, + 0.874 + ], + "angle": 0, + "content": "Yann Fleureau, Jia Li, Edward Beeching, Lewis Tunstall, Ben Lipkin, Roman Soletskyi, Shengyi Costa Huang, and Kashif Rasul. How NuminaMath won the 1st AIMO Progress Prize. https://huggingface.co/blog/winning-aimo-progress-prize, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.879, + 0.885, + 0.928 + ], + "angle": 0, + "content": "Adam Fourney, Gagan Bansal, Hussein Mozannar, Cheng Tan, Eduardo Salinas, Friederike Niedtner, Grace Proebsting, Griffin Bassman, Jack Gerrits, Jacob Alber, et al. Magentic-one: A generalist multi-agent system for solving complex tasks. arXiv preprint arXiv:2411.04468, 2024." + }, + { + "type": "list", + "bbox": [ + 0.113, + 0.103, + 0.885, + 0.928 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.51, + 0.96 + ], + "angle": 0, + "content": "46" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.103, + 0.885, + 0.15 + ], + "angle": 0, + "content": "Adrian Garret Gabriel, Alaa Alameer Ahmad, and Shankar Kumar Jeyakumar. Advancing agentic systems: Dynamic task decomposition, tool integration and evaluation using novel metrics and dataset, 2024. URL https://arxiv.org/abs/2410.22457." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.157, + 0.885, + 0.222 + ], + "angle": 0, + "content": "Kanishk Gandhi, Jan-Philipp Franken, Tobias Gerstenberg, and Noah Goodman. Understanding social reasoning in language models with language models. In Thirty-seventh Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2023. URL https://openreview.net/forum?id=8bqjirgxQM." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.227, + 0.885, + 0.275 + ], + "angle": 0, + "content": "Deep Ganguli, Liane Lovitt, Jackson Kernion, Amanda Askell, Yuntao Bai, Saurav Kadavath, Ben Mann, Ethan Perez, Nicholas Schiefer, Kamal Ndousse, et al. Red teaming language models to reduce harms: Methods, scaling behaviors, and lessons learned. arXiv preprint arXiv:2209.07858, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.281, + 0.885, + 0.316 + ], + "angle": 0, + "content": "Jiaxuan Gao, Shusheng Xu, Wenjie Ye, Weilin Liu, Chuyi He, Wei Fu, Zhiyu Mei, Guangju Wang, and Yi Wu. On designing effective rl reward at training time for llm reasoning. arXiv preprint arXiv:2410.15115, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.32, + 0.885, + 0.354 + ], + "angle": 0, + "content": "Peizhong Gao, Ao Xie, Shaoguang Mao, Wenshan Wu, Yan Xia, Haipeng Mi, and Furu Wei. Meta reasoning for large language models. arXiv preprint arXiv:2406.11698, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.359, + 0.885, + 0.407 + ], + "angle": 0, + "content": "Olga Golovneva, Moya Chen, Spencer Poff, Martin Corredor, Luke Zettlemoyer, Maryam Fazel-Zarandi, and Asli Celikyilmaz. Roscoe: A suite of metrics for scoring step-by-step reasoning. arXiv preprint arXiv:2212.07919, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.414, + 0.885, + 0.476 + ], + "angle": 0, + "content": "Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. Advances in neural information processing systems, 27, 2014. URL https://proceedings.neurips.cc/paper_files/paper/2014/file/5ca3e9b122f61f8f06494c97b1afccf3-Paper.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.482, + 0.885, + 0.546 + ], + "angle": 0, + "content": "Zhibin Gou, Zhihong Shao, Yeyun Gong, Yelong Shen, Yujiu Yang, Minlie Huang, Nan Duan, and Weizhu Chen. Tora: A tool-integrated reasoning agent for mathematical problem solving. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=Ep0TjVoap." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.552, + 0.885, + 0.601 + ], + "angle": 0, + "content": "Sachin Goyal, Ziwei Ji, Ankit Singh Rawat, Aditya Krishna Menon, Sanjiv Kumar, and Vaishnavh Nagarajan. Think before you speak: Training language models with pause tokens. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=ph04CRkPdC." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.607, + 0.885, + 0.655 + ], + "angle": 0, + "content": "Nate Gruver, Marc Anton Finzi, Shikai Qiu, and Andrew Gordon Wilson. Large language models are zero-shot time series forecasters. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=md68e8iZK1." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.661, + 0.885, + 0.695 + ], + "angle": 0, + "content": "Zhengyao Gu, Henry Peng Zou, Yankai Chen, Aiwei Liu, Weizhi Zhang, and Philip S Yu. Semi-supervised in-context learning: A baseline study. arXiv preprint arXiv:2503.03062, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.7, + 0.885, + 0.748 + ], + "angle": 0, + "content": "Xinyu Guan, Li Lyna Zhang, Yifei Liu, Ning Shang, Youran Sun, Yi Zhu, Fan Yang, and Mao Yang. rstar-math: Small llms can master math reasoning with self-evolved deep thinking. arXiv preprint arXiv: 2501.04519, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.754, + 0.885, + 0.801 + ], + "angle": 0, + "content": "Jiaxian Guo, Bo Yang, Paul Yoo, Bill Yuchen Lin, Yusuke Iwasawa, and Yutaka Matsuo. Suspicion-agent: Playing imperfect information games with theory of mind aware gpt-4. arXiv preprint arXiv:2309.17277, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.808, + 0.885, + 0.872 + ], + "angle": 0, + "content": "Qingyan Guo, Rui Wang, Junliang Guo, Bei Li, Kaitao Song, Xu Tan, Guoqing Liu, Jiang Bian, and Yujiu Yang. Connecting large language models with evolutionary algorithms yields powerful prompt optimizers. In The Twelfth International Conference on Learning Representations, 2024a. URL https://openreview.net/forum?id=ZG3RaNIs08." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.878, + 0.885, + 0.927 + ], + "angle": 0, + "content": "Taicheng Guo, Xiuying Chen, Yaqi Wang, Ruidi Chang, Shichao Pei, Nitesh V Chawla, Olaf Wiest, and Xiangliang Zhang. Large language model based multi-agents: A survey of progress and challenges. arXiv preprint arXiv:2402.01680, 2024b." + }, + { + "type": "list", + "bbox": [ + 0.113, + 0.103, + 0.885, + 0.927 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "47" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.103, + 0.885, + 0.152 + ], + "angle": 0, + "content": "Zakaria Hammane, Fatima-Ezzahraa Ben-Bouazza, and Abdelhadi Fennan. Selfrewarddrag: Enhancing medical reasoning with retrieval-augmented generation and self-evaluation in large language models. In 2024 International Conference on Intelligent Systems and Computer Vision (ISCV), pp. 1-8. IEEE, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.161, + 0.885, + 0.209 + ], + "angle": 0, + "content": "Simeng Han, Hailey Schoelkopf, Yilun Zhao, Zhenting Qi, Martin Riddell, Wenfei Zhou, James Coady, David Peng, Yujie Qiao, Luke Benson, et al. Folio: Natural language reasoning with first-order logic. arXiv preprint arXiv:2209.00840, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.218, + 0.885, + 0.311 + ], + "angle": 0, + "content": "Simeng Han, Aaron Yu, Rui Shen, Zhenting Qi, Martin Riddell, Wenfei Zhou, Yujie Qiao, Yilun Zhao, Semih Yavuz, Ye Liu, Shafiq Joty, Yingbo Zhou, Caiming Xiong, Dragomir Radev, Rex Ying, and Arman Cohen. P-FOLIO: Evaluating and improving logical reasoning with abundant human-written reasoning chains. In Findings of the Association for Computational Linguistics: EMNLP 2024, pp. 16553-16565, Miami, Florida, USA, November 2024a. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-emnlp.966. URL https://aclanthology.org/2024-findings-emnlp.966/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.321, + 0.885, + 0.353 + ], + "angle": 0, + "content": "Simon Jerome Han, Keith J Ransom, Andrew Perfors, and Charles Kemp. Inductive reasoning in humans and large language models. Cognitive Systems Research, 83:101155, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.363, + 0.885, + 0.441 + ], + "angle": 0, + "content": "Shibo Hao, Yi Gu, Haodi Ma, Joshua Jiahua Hong, Zhen Wang, Daisy Zhe Wang, and Zhiting Hu. Reasoning with language model is planning with world model. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, EMNLP 2023, Singapore, December 6-10, 2023, pp. 8154-8173. Association for Computational Linguistics, 2023. doi: 10.18653/V1/2023.EMNLP-MAIN.507. URL https://doi.org/10.18653/v1/2023.emnlp-main.507." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.45, + 0.885, + 0.498 + ], + "angle": 0, + "content": "Shibo Hao, Yi Gu, Haotian Luo, Tianyang Liu, Xiyuan Shao, Xinyuan Wang, Shuhua Xie, Haodi Ma, Adithya Samavedhi, Qiyue Gao, et al. Llm reasoners: New evaluation, library, and analysis of step-by-step reasoning with large language models. arXiv preprint arXiv:2404.05221, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.508, + 0.885, + 0.554 + ], + "angle": 0, + "content": "Shibo Hao, Sainbayar Sukhbaatar, DiJia Su, Xian Li, Zhiting Hu, Jason Weston, and Yuandong Tian. Training large language models to reason in a continuous latent space. CoRR, abs/2412.06769, 2024b. URL https://doi.org/10.48550/arXiv.2412.06769." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.565, + 0.885, + 0.611 + ], + "angle": 0, + "content": "Peter Hase, Shiyue Zhang, Harry Xie, and Mohit Bansal. Leakage-adjusted simulatability: Can models generate non-trivial explanations of their behavior in natural language? arXiv preprint arXiv:2010.04119, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.622, + 0.885, + 0.655 + ], + "angle": 0, + "content": "Michael Hassid, Tal Remez, Jonas Gehring, Roy Schwartz, and Yossi Adi. The larger the better? improved llm code-generation via budget reallocation. arXiv preprint arXiv:2404.00725, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.665, + 0.885, + 0.712 + ], + "angle": 0, + "content": "Alex Havrilla, Yuqing Du, Sharath Chandra Raparthy, Christoforos Nalmpantis, Jane Dwivedi-Yu, Maksym Zhuravinskyi, Eric Hambro, Sainbayar Sukhbaatar, and Roberta Raileanu. Teaching large language models to reason with reinforcement learning. arXiv preprint arXiv:2403.04642, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.722, + 0.885, + 0.769 + ], + "angle": 0, + "content": "Jiabang He, Lei Wang, Yi Hu, Ning Liu, Hui Liu, Xing Xu, and Heng Tao Shen. Icl-d3ie: In-context learning with diverse demonstrations updating for document information extraction. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 19485-19494, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.779, + 0.885, + 0.813 + ], + "angle": 0, + "content": "Jinwei He and Feng Lu. Causejudger: Identifying the cause with llms for abductive logical reasoning. arXiv preprint arXiv:2409.05559, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.822, + 0.885, + 0.868 + ], + "angle": 0, + "content": "Dan Hendrycks, Steven Basart, Saurav Kadavath, Mantas Mazeika, Akul Arora, Ethan Guo, Collin Burns, Samir Puranik, Horace He, Dawn Song, and Jacob Steinhardt. Measuring coding challenge competence with apps. NeurIPS, 2021a." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.879, + 0.885, + 0.925 + ], + "angle": 0, + "content": "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the math dataset. arXiv preprint arXiv:2103.03874, 2021b." + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.103, + 0.885, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "48" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.103, + 0.885, + 0.181 + ], + "angle": 0, + "content": "Jonathan Herzig, Pawel Krzysztof Nowak, Thomas Müller, Francesco Piccinno, and Julian Eisenschlos. TaPas: Weakly supervised table parsing via pre-training. In Dan Jurafsky, Joyce Chai, Natalie Schluter, and Joel Tetreault (eds.), Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pp. 4320-4333, Online, July 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.acl-main.398. URL https://aclanthology.org/2020.acl-main.398/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.187, + 0.885, + 0.222 + ], + "angle": 0, + "content": "Keith J Holyoak. Analogy and relational reasoning. The Oxford handbook of thinking and reasoning, pp. 234-259, 2012. URL https://psycnet.apa.org/record/2012-08871-013." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.227, + 0.885, + 0.274 + ], + "angle": 0, + "content": "Jiwoo Hong, Noah Lee, and James Thorne. Orpo: Monolithic preference optimization without reference model. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pp. 11170-11189, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.281, + 0.885, + 0.328 + ], + "angle": 0, + "content": "Sirui Hong, Xiawu Zheng, Jonathan Chen, Yuheng Cheng, Jinlin Wang, Ceyao Zhang, Zili Wang, Steven Ka Shing Yau, Zijuan Lin, Liyang Zhou, et al. Metagpt: Meta programming for multi-agent collaborative framework. arXiv preprint arXiv:2308.00352, 3(4):6, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.335, + 0.885, + 0.368 + ], + "angle": 0, + "content": "Xinyi Hou, Yanjie Zhao, Shenao Wang, and Haoyu Wang. Model context protocol (mcp): Landscape, security threats, and future research directions. arXiv preprint arXiv:2503.23278, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.374, + 0.885, + 0.42 + ], + "angle": 0, + "content": "Shengran Hu, Cong Lu, and Jeff Clune. Automated design of agentic systems. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=t9U3LW7JVX." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.428, + 0.885, + 0.461 + ], + "angle": 0, + "content": "Xinyu Hu, Li Lin, Mingqi Gao, Xunjian Yin, and Xiaojun Wan. Themis: A reference-free nlg evaluation language model with flexibility and interpretability. arXiv preprint arXiv:2406.18365, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.468, + 0.885, + 0.544 + ], + "angle": 0, + "content": "Jie Huang and Kevin Chen-Chuan Chang. Towards reasoning in large language models: A survey. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Findings of the Association for Computational Linguistics: ACL 2023, pp. 1049–1065, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.findings-acl.67. URL https://aclanthology.org/2023-findings-acl.67/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.552, + 0.885, + 0.601 + ], + "angle": 0, + "content": "Jie Huang, Xinyun Chen, Swaroop Mishra, Huaixiu Steven Zheng, Adams Wei Yu, Xinying Song, and Denny Zhou. Large language models cannot self-correct reasoning yet. In The Twelfth International Conference on Learning Representations, 2024a. URL https://openreview.net/forum?id=IkmD3fKBPQ." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.607, + 0.885, + 0.683 + ], + "angle": 0, + "content": "Siming Huang, Tianhao Cheng, J. K. Liu, Jiaran Hao, Liuyihan Song, Yang Xu, J. Yang, J. H. Liu, Chenchen Zhang, Linzheng Chai, Ruifeng Yuan, Zhaoxiang Zhang, Jie Fu, Qian Liu, Ge Zhang, Zili Wang, Yuan Qi, Yinghui Xu, and Wei Chu. Opencoder: The open cookbook for top-tier code large language models. CoRR, abs/2411.04905, 2024b. doi: 10.48550/ARXIV.2411.04905. URL https://doi.org/10.48550/arXiv.2411.04905." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.691, + 0.885, + 0.739 + ], + "angle": 0, + "content": "Yuncheng Huang, Qianyu He, Yipei Xu, Jiaqing Liang, and Yanghua Xiao. Laying the foundation first? investigating the generalization from atomic skills to complex reasoning tasks, 2024c. URL https:// arxiv.org/abs/2403.09479." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.746, + 0.885, + 0.793 + ], + "angle": 0, + "content": "Zhen Huang, Haoyang Zou, Xuefeng Li, Yixiu Liu, Yuxiang Zheng, Ethan Chern, Shijie Xia, Yiwei Qin, Weizhe Yuan, and Pengfei Liu. O1 replication journey-part 2: Surpassing o1-preview through simple distillation, big progress or bitter lesson? arXiv preprint arXiv:2411.16489, 2024d." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.8, + 0.885, + 0.833 + ], + "angle": 0, + "content": "Binyuan Hui, Jian Yang, Zeyu Cui, Jiaxi Yang, Dayiheng Liu, Lei Zhang, Tianyu Liu, Jiajun Zhang, Bowen Yu, Keming Lu, et al. Qwen2. 5-coder technical report. arXiv preprint arXiv:2409.12186, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.839, + 0.885, + 0.872 + ], + "angle": 0, + "content": "Michael Huth and Mark Ryan. Logic in computer science: Modelling and reasoning about systems. Cambridge university press., 86, 2004." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.878, + 0.885, + 0.926 + ], + "angle": 0, + "content": "Naman Jain, King Han, Alex Gu, Wen-Ding Li, Fanjia Yan, Tianjun Zhang, Sida Wang, Armando Solar-Lezama, Koushik Sen, and Ion Stoica. Livocodebench: Holistic and contamination free evaluation of large language models for code. arXiv preprint arXiv:2403.07974, 2024." + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.103, + 0.885, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "49" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.103, + 0.885, + 0.15 + ], + "angle": 0, + "content": "Harold Jeffreys. An invariant form for the prior probability in estimation problems. Proceedings of the Royal Society of London. Series A, Mathematical and Physical Sciences, 186:453-461, 1946. doi: 10.1098/rspa.1946.0056. URL http://doi.org/10.1098/rspa.1946.0056." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.159, + 0.885, + 0.207 + ], + "angle": 0, + "content": "Albert Q. Jiang, Wenda Li, and Mateja Jamnik. Multi-language diversity benefits autoformalization. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024a. URL https://openreview.net/forum?id=2jjfRm2R6D." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.215, + 0.885, + 0.293 + ], + "angle": 0, + "content": "Jinhao Jiang, Kun Zhou, Zican Dong, Keming Ye, Xin Zhao, and Ji-Rong Wen. StructGPT: A general framework for large language model to reason over structured data. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pp. 9237-9251, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.574. URL https://aclanthology.org/2023.emnlp-main.574/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.301, + 0.885, + 0.348 + ], + "angle": 0, + "content": "Jinhao Jiang, Jiayi Chen, Junyi Li, Ruiyang Ren, Shijie Wang, Wayne Xin Zhao, Yang Song, and Tao Zhang. Rag-star: Enhancing deliberative reasoning with retrieval augmented verification and refinement. arXiv preprint arXiv:2412.12881, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.357, + 0.885, + 0.433 + ], + "angle": 0, + "content": "Fangkai Jiao, Chengwei Qin, Zhengyuan Liu, Nancy Chen, and Shafiq Joty. Learning planning-based reasoning by trajectories collection and process reward synthesizing. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (eds.), Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, EMNLP 2024, Miami, FL, USA, November 12-16, 2024, pp. 334-350. Association for Computational Linguistics, 2024a. URL https://aclanthology.org/2024.emnlp-main.20." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.443, + 0.885, + 0.52 + ], + "angle": 0, + "content": "Fangkai Jiao, Zhiyang Teng, Bosheng Ding, Zhengyuan Liu, Nancy F. Chen, and Shafiq Joty. Exploring self-supervised logic-enhanced training for large language models. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), NAACL 2024, Mexico City, Mexico, June 16-21, 2024, pp. 926-941. Association for Computational Linguistics, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.529, + 0.885, + 0.561 + ], + "angle": 0, + "content": "Fangkai Jiao, Geyang Guo, Xingxing Zhang, Nancy F. Chen, Shafiq Joty, and Furu Wei. Preference optimization for reasoning with pseudo feedback. In ICLR. OpenReview.net, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.57, + 0.885, + 0.631 + ], + "angle": 0, + "content": "Carlos E. Jimenez, John Yang, Alexander Wettig, Shunyu Yao, Kexin Pei, Ofir Press, and Karthik R. Narasimhan. Swe-bench: Can language models resolve real-world github issues? In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=VTF8yNQM66." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.641, + 0.885, + 0.733 + ], + "angle": 0, + "content": "Jaehun Jung, Lianhui Qin, Sean Welleck, Faeze Brahman, Chandra Bhagavatula, Ronan Le Bras, and Yejin Choi. Maieutic prompting: Logically consistent reasoning with recursive explanations. In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang (eds.), Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 1266-1279, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.emnlp-main.82. URL https://aclanthology.org/2022.emnlp-main.82/." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.742, + 0.885, + 0.774 + ], + "angle": 0, + "content": "Jaehun Jung, Faeze Brahman, and Yejin Choi. Trust or escalate: Llm judges with provable guarantees for human agreement. arXiv preprint arXiv:2407.18370, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.783, + 0.885, + 0.829 + ], + "angle": 0, + "content": "Katie Kang, Amrith Setlur, Dibya Ghosh, Jacob Steinhardt, Claire Tomlin, Sergey Levine, and Aviral Kumar. What do learning dynamics reveal about generalization in llm reasoning?, 2024. URL https://arxiv.org/abs/2411.07681." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.838, + 0.885, + 0.871 + ], + "angle": 0, + "content": "Sayash Kapoor, Benedikt Stroebl, Zachary S Siegel, Nitya Nadgir, and Arvind Narayanan. Ai agents that matter. arXiv preprint arXiv:2407.01502, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.879, + 0.885, + 0.926 + ], + "angle": 0, + "content": "Amirhossein Kazemnejad, Milad Aghajohari, Eva Portelance, Alessandro Sordoni, Siva Reddy, Aaron Courville, and Nicolas Le Roux. Vineppo: Unlocking rl potential for llm reasoning through refined credit assignment. arXiv preprint arXiv:2410.01679, 2024." + }, + { + "type": "list", + "bbox": [ + 0.113, + 0.103, + 0.885, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "50" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.103, + 0.885, + 0.137 + ], + "angle": 0, + "content": "Zixuan Ke and Bing Liu. Continual learning of natural language processing tasks: A survey, 2023. URL https://arxiv.org/abs/2211.12701." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.144, + 0.885, + 0.178 + ], + "angle": 0, + "content": "Zixuan Ke, Yijia Shao, Haowei Lin, Tatsuya Konishi, Gyuhak Kim, and Bing Liu. Continual pre-training of language models, 2023. URL https://arxiv.org/abs/2302.03241." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.186, + 0.885, + 0.22 + ], + "angle": 0, + "content": "Zixuan Ke, Weize Kong, Cheng Li, Mingyang Zhang, Qiaozhu Mei, and Michael Bendersky. Bridging the preference gap between retrievers and llms, 2024. URL https://arxiv.org/abs/2401.06954." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.228, + 0.885, + 0.321 + ], + "angle": 0, + "content": "Zixuan Ke, Yifei Ming, and Shafiq Joty. Adaptation of large language models. In Maria Lomeli, Swabha Swayamdipta, and Rui Zhang (eds.), Proceedings of the 2025 Annual Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 5: Tutorial Abstracts), pp. 30-37, Albuquerque, New Mexico, May 2025a. Association for Computational Linguistics. ISBN 979-8-89176-193-3. doi: 10.18653/v1/2025.naacl-tutorial.5. URL https://aclanthology.org/2025.naacl-tutorial.5/." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.33, + 0.885, + 0.363 + ], + "angle": 0, + "content": "Zixuan Ke, Yifei Ming, Xuan-Phi Nguyen, Caiming Xiong, and Shafiq Joty. Demystifying domain-adaptive post-training for financial llms. arXiv preprint arXiv:2501.04961, 2025b." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.371, + 0.885, + 0.405 + ], + "angle": 0, + "content": "Zixuan Ke, Austin Xu, Yifei Ming, Xuan-Phi Nguyen, Caiming Xiong, and Shafiq Joty. Mas-zero: Designing multi-agent systems with zero supervision, 2025c. URL https://arxiv.org/abs/2505.14996." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.413, + 0.885, + 0.461 + ], + "angle": 0, + "content": "Omar Khattab, Keshav Santhanam, Xiang Lisa Li, David Hall, Percy Liang, Christopher Potts, and Matei Zaharia. Demonstrate-search-predict: Composing retrieval and language models for knowledge-intensive nlp. arXiv preprint arXiv:2212.14024, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.47, + 0.885, + 0.533 + ], + "angle": 0, + "content": "Tushar Khot, Harsh Trivedi, Matthew Finlayson, Yao Fu, Kyle Richardson, Peter Clark, and Ashish Sabharwal. Decomposed prompting: A modular approach for solving complex tasks. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=nGgzQjzaRy." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.542, + 0.885, + 0.589 + ], + "angle": 0, + "content": "Dongkwan Kim, Junho Myung, and Alice Oh. Salad-bowl-LLM: Multi-culture LLMs by in-context demonstrations from diverse cultures. In Workshop on Socially Responsible Language Modelling Research, 2024a. URL https://openreview.net/forum?id=KsAfPGPZZn." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.599, + 0.885, + 0.646 + ], + "angle": 0, + "content": "Seungone Kim, Jamin Shin, Yejin Cho, Joel Jang, Shayne Longpre, Hwaran Lee, Sangdoo Yun, Seongjin Shin, Sungdong Kim, James Thorne, et al. Prometheus: Inducing fine-grained evaluation capability in language models. In The Twelfth International Conference on Learning Representations, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.655, + 0.885, + 0.703 + ], + "angle": 0, + "content": "Seungone Kim, Juyoung Suk, Ji Yong Cho, Shayne Longpre, Chaeun Kim, Dongkeun Yoon, Guijin Son, Yejin Cho, Sheikh Shafayat, Jinheon Baek, et al. The biggen bench: A principled benchmark for fine-grained evaluation of language models with language models. arXiv preprint arXiv:2406.05761, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.712, + 0.885, + 0.76 + ], + "angle": 0, + "content": "Seungone Kim, Juyoung Suk, Shayne Longpre, Bill Yuchen Lin, Jamin Shin, Sean Welleck, Graham Neubig, Moontae Lee, Kyungjae Lee, and Minjoon Seo. Prometheus 2: An open source language model specialized in evaluating other language models. arXiv preprint arXiv:2405.01535, 2024c." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.768, + 0.885, + 0.816 + ], + "angle": 0, + "content": "Sunghwan Kim, Dongjin Kang, Taeyoon Kwon, Hyungjoo Chae, Jungsoo Won, Dongha Lee, and Jinyoung Yeo. Evaluating robustness of reward models for mathematical reasoning, 2024d. URL https://arxiv.org/abs/2410.01729." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.825, + 0.885, + 0.859 + ], + "angle": 0, + "content": "Takeshi Kojima, Shixiang Shane Gu, Machel Reid, Yutaka Matsuo, and Yusuke Iwasawa. Large language models are zero-shot reasoners. Advances in neural information processing systems, 35:22199-22213, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.867, + 0.885, + 0.884 + ], + "angle": 0, + "content": "Wouter Kool, Herke van Hoof, and Max Welling. Buy 4 reinforce samples, get a baseline for free! 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.893, + 0.885, + 0.927 + ], + "angle": 0, + "content": "Michal Kosinski. Evaluating large language models in theory of mind tasks. Proceedings of the National Academy of Sciences, 121(45):e2405460121, 2024." + }, + { + "type": "list", + "bbox": [ + 0.113, + 0.103, + 0.885, + 0.927 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.508, + 0.961 + ], + "angle": 0, + "content": "51" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.103, + 0.885, + 0.137 + ], + "angle": 0, + "content": "Julia Kreutzer, Artem Sokolov, and Stefan Riezler. Bandit structured prediction for neural sequence-to-sequence learning. arXiv preprint arXiv:1704.06497, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.144, + 0.885, + 0.222 + ], + "angle": 0, + "content": "Aviral Kumar, Vincent Zhuang, Rishabh Agarwal, Yi Su, John D. Co-Reyes, Avi Singh, Kate Baumli, Shariq Iqbal, Colton Bishop, Rebecca Roelofs, Lei M. Zhang, Kay McKinney, Disha Shrivastava, Cosmin Paduraru, George Tucker, Doina Precup, Feryal M. P. Behbahani, and Aleksandra Faust. Training language models to self-correct via reinforcement learning. CoRR, abs/2409.12917, 2024. doi: 10.48550/ARXIV.2409.12917. URL https://doi.org/10.48550/arXiv.2409.12917." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.231, + 0.885, + 0.279 + ], + "angle": 0, + "content": "Xin Lai, Zhuotao Tian, Yukang Chen, Senqiao Yang, Xiangru Peng, and Jiaya Jia. Step-dpo: Step-wise preference optimization for long-chain reasoning of llms. CoRR, abs/2406.18629, 2024. doi: 10.48550/ ARXIV.2406.18629. URL https://doi.org/10.48550/arXiv.2406.18629." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.287, + 0.885, + 0.364 + ], + "angle": 0, + "content": "Nathan Lambert, Jacob Morrison, Valentina Pyatkin, Shengyi Huang, Hamish Ivison, Faeze Brahman, Lester James V. Miranda, Alisa Liu, Nouha Dziri, Shane Lyu, Yuling Gu, Saumya Malik, Victoria Graf, Jena D. Hwang, Jiangjiang Yang, Ronan Le Bras, Oyvind Tafjord, Chris Wilhelm, Luca Soldaini, Noah A. Smith, Yizhong Wang, Pradeep Dasigi, and Hannaneh Hajishirzi. Tülu 3: Pushing frontiers in open language model post-training. 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.373, + 0.885, + 0.405 + ], + "angle": 0, + "content": "Qiangfeng Peter Lau, Mong-Li Lee, and Wynne Hsu. Coordination guided reinforcement learning. In AAMAS, pp. 215-222, 2012." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.414, + 0.885, + 0.461 + ], + "angle": 0, + "content": "Harrison Lee, Samrat Phatale, Hassan Mansoor, Kellie Ren Lu, Thomas Mesnard, Johan Ferret, Colton Bishop, Ethan Hall, Victor Carbune, and Abhinav Rastogi. Rlaif: Scaling reinforcement learning from human feedback with ai feedback. 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.47, + 0.885, + 0.517 + ], + "angle": 0, + "content": "Sangmin Lee, Minzhi Li, Bolin Lai, Wenqi Jia, Fiona Ryan, Xu Cao, Ozgur Kara, Bikram Boote, Weiyan Shi, Diyi Yang, et al. Towards social ai: A survey on understanding social interactions. arXiv preprint arXiv:2409.15316, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.526, + 0.885, + 0.604 + ], + "angle": 0, + "content": "Itay Levy, Ben Boin, and Jonathan Berant. Diverse demonstrations improve in-context compositional generalization. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 1401-1422, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.78. URL https://aclanthology.org/2023.acl-long.78/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.613, + 0.885, + 0.66 + ], + "angle": 0, + "content": "Chengshu Li, Jacky Liang, Andy Zeng, Xinyun Chen, Karol Hausman, Dorsa Sadigh, Sergey Levine, Li Fei-Fei, Fei Xia, and Brian Ichter. Chain of code: Reasoning with a language model-augmented code emulator. arXiv preprint arXiv:2312.04474, 2023a." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.669, + 0.885, + 0.746 + ], + "angle": 0, + "content": "Haoran Li, Qingxiu Dong, Zhengyang Tang, Chaojun Wang, Xingxing Zhang, Haoyang Huang, Shaohan Huang, Xiaolong Huang, Zeqiang Huang, Dongdong Zhang, Yuxian Gu, Xin Cheng, Xun Wang, Si-Qing Chen, Li Dong, Wei Lu, Zhifang Sui, Benyou Wang, Wai Lam, and Furu Wei. Synthetic data (almost) from scratch: Generalized instruction tuning for language models. CoRR, abs/2402.13064, 2024a. URL https://doi.org/10.48550/arXiv.2402.13064." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.755, + 0.885, + 0.788 + ], + "angle": 0, + "content": "Junlong Li, Shichao Sun, Weizhe Yuan, Run-Ze Fan, Hai Zhao, and Pengfei Liu. Generative judge for evaluating alignment. arXiv preprint arXiv:2310.05470, 2023b." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.796, + 0.885, + 0.845 + ], + "angle": 0, + "content": "Junyou Li, Qin Zhang, Yangbin Yu, Qiang Fu, and Deheng Ye. More agents is all you need. Transactions on Machine Learning Research, 2024b. ISSN 2835-8856. URL https://openreview.net/forum?id=bgzUSZ8aeg." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.853, + 0.885, + 0.885 + ], + "angle": 0, + "content": "Ming Li, Jiuhai Chen, Lichang Chen, and Tianyi Zhou. Can llms speak for diverse people? tuning llms via debate to generate controllable controversial statements. arXiv preprint arXiv:2402.10614, 2024c." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.893, + 0.885, + 0.926 + ], + "angle": 0, + "content": "Minzhi Li, Weiyan Shi, Caleb Ziems, and Diyi Yang. Social intelligence data infrastructure: Structuring the present and navigating the future. arXiv preprint arXiv:2403.14659, 2024d." + }, + { + "type": "list", + "bbox": [ + 0.113, + 0.103, + 0.885, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "52" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.103, + 0.885, + 0.151 + ], + "angle": 0, + "content": "Minzhi Li, Zhengyuan Liu, Shumin Deng, Shafiq Joty, Nancy Chen, and Min-Yen Kan. Dna-eval: Enhancing large language model evaluation through decomposition and aggregation. In Proceedings of the 31st International Conference on Computational Linguistics, pp. 2277-2290, 2025a." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.159, + 0.885, + 0.192 + ], + "angle": 0, + "content": "Mukai Li, Shansan Gong, Jiangtao Feng, Yiheng Xu, Jun Zhang, Zhiyong Wu, and Lingpeng Kong. Incontext learning with many demonstration examples. arXiv preprint arXiv:2302.04931, 2023c." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.199, + 0.885, + 0.232 + ], + "angle": 0, + "content": "Ruosen Li, Teerth Patel, and Xinya Du. Prd: Peer rank and discussion improve large language model based evaluations. arXiv preprint arXiv:2307.02762, 2023d." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.24, + 0.885, + 0.272 + ], + "angle": 0, + "content": "Sheng Li, Jayesh K Gupta, Peter Morales, Ross Allen, and Mykel J Kochenderfer. Deep implicit coordination graphs for multi-agent reinforcement learning. arXiv preprint arXiv:2006.11438, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.28, + 0.885, + 0.371 + ], + "angle": 0, + "content": "Xiaonan Li, Kai Lv, Hang Yan, Tianyang Lin, Wei Zhu, Yuan Ni, Guotong Xie, Xiaoling Wang, and Xipeng Qiu. Unified demonstration retriever for in-context learning. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 4644-4668, Toronto, Canada, July 2023e. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.256. URL https://aclanthology.org/2023.acl-long.256/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.38, + 0.885, + 0.428 + ], + "angle": 0, + "content": "Xingxuan Li, Ruochen Zhao, Yew Ken Chia, Bosheng Ding, Shafiq Joty, Soujanya Poria, and Lidong Bing. Chain-of-knowledge: Grounding large language models via dynamic knowledge adapting over heterogeneous sources, 2024e. URL https://arxiv.org/abs/2305.13269." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.436, + 0.885, + 0.484 + ], + "angle": 0, + "content": "Yang Li, Wenhao Zhang, Jianhong Wang, Shao Zhang, Yali Du, Ying Wen, and Wei Pan. Aligning individual and collective objectives in multi-agent cooperation. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024f. URL https://openreview.net/forum?id=2YSHEBRRol." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.491, + 0.885, + 0.598 + ], + "angle": 0, + "content": "Yujia Li, David Choi, Junyoung Chung, Nate Kushman, Julian Schrittwieser, Rémi Leblond, Tom Eccles, James Keeling, Felix Gimeno, Agustin Dal Lago, Thomas Hubert, Peter Choy, Cyprien de Masson d'Autume, Igor Babuschkin, Xinyun Chen, Po-Sen Huang, Johannes Welbl, Sven Gowal, Alexey Cherepanov, James Molloy, Daniel J. Mankowitz, Esme Sutherland Robson, Pushmeet Kohli, Nando de Freitas, Koray Kavukcuoglu, and Oriol Vinyals. Competition-level code generation with alphabet. Science, 378(6624):1092-1097, December 2022. ISSN 1095-9203. doi: 10.1126/science.abq1158. URL http://dx.doi.org/10.1126/science.abq1158." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.607, + 0.885, + 0.669 + ], + "angle": 0, + "content": "Zenan Li, Zhaoyu Li, Wen Tang, Xian Zhang, Yuan Yao, Xujie Si, Fan Yang, Kaiyu Yang, and Xiaoxing Ma. Proving olympiad inequalities by synergizing LLMs and symbolic reasoning. In The Thirteenth International Conference on Learning Representations, 2025b. URL https://openreview.net/forum?id=FiyS0ecSm0." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.678, + 0.885, + 0.755 + ], + "angle": 0, + "content": "Zhaoyi Li, Gangwei Jiang, Hong Xie, Linqi Song, Defu Lian, and Ying Wei. Understanding and patching compositional reasoning in LLMs. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Findings of the Association for Computational Linguistics: ACL 2024, pp. 9668-9688, Bangkok, Thailand, August 2024g. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-acl.576. URL https://aclanthology.org/2024-findings-acl.576/." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.763, + 0.885, + 0.81 + ], + "angle": 0, + "content": "Zhiyuan Li, Hong Liu, Denny Zhou, and Tengyu Ma. Chain of thought empowers transformers to solve inherently serial problems. In The Twelfth International Conference on Learning Representations, 2024h. URL https://openreview.net/forum?id=3EWTEy9MTM." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.818, + 0.885, + 0.926 + ], + "angle": 0, + "content": "Percy Liang, Rishi Bommasani, Tony Lee, Dimitris Tsipras, Dilara Soylu, Michihiro Yasunaga, Yian Zhang, Deepak Narayanan, Yuhuai Wu, Ananya Kumar, Benjamin Newman, Binhang Yuan, Bobby Yan, Ce Zhang, Christian Alexander Cosgrove, Christopher D Manning, Christopher Re, Diana Acosta-Navas, Drew Arad Hudson, Eric Zelikman, Esin Durmus, Faisal Ladhak, Frieda Rong, Hongyu Ren, Huaxiu Yao, Jue WANG, Keshav Santhanam, Laurel Orr, Lucia Zheng, Mert Yuksekgonul, Mirac Suzgun, Nathan Kim, Neel Guha, Niladri S. Chatterji, Omar Khattab, Peter Henderson, Qian Huang, Ryan Andrew Chi, Sang Michael Xie, Shibani Santurkar, Surya Ganguli, Tatsunori Hashimoto, Thomas Icard, Tianyi Zhang," + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.103, + 0.885, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "53" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.128, + 0.103, + 0.885, + 0.15 + ], + "angle": 0, + "content": "Vishrav Chaudhary, William Wang, Xuechen Li, Yifan Mai, Yuhui Zhang, and Yuta Koreeda. Holistic evaluation of language models. Transactions on Machine Learning Research, 2023a. ISSN 2835-8856. URL https://openreview.net/forum?id=i04LZibEqW. Featured Certification, Expert Certification." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.159, + 0.885, + 0.207 + ], + "angle": 0, + "content": "Tian Liang, Zhiwei He, Wenxiang Jiao, Xing Wang, Yan Wang, Rui Wang, Yujiu Yang, Shuming Shi, and Zhaopeng Tu. Encouraging divergent thinking in large language models through multi-agent debate. arXiv preprint arXiv:2305.19118, 2023b." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.215, + 0.885, + 0.261 + ], + "angle": 0, + "content": "Yancheng Liang, Daphne Chen, Abhishek Gupta, Simon Shaolei Du, and Natasha Jaques. Learning to cooperate with humans using generative agents. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. URL https://openreview.net/forum?id=v4dXL3LsGX." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.27, + 0.885, + 0.332 + ], + "angle": 0, + "content": "Hunter Lightman, Vineet Kosaraju, Yuri Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=v8LOpN6EOi." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.341, + 0.885, + 0.433 + ], + "angle": 0, + "content": "Bill Yuchen Lin, Seyeon Lee, Xiaoyang Qiao, and Xiang Ren. Common sense beyond English: Evaluating and improving multilingual language models for commonsense reasoning. In Chengqing Zong, Fei Xia, Wenjie Li, and Roberto Navigli (eds.), Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pp. 1274-1287, Online, August 2021. Association for Computational Linguistics. doi: 10.18653/v1/2021.acl-long.102. URL https://aclanthology.org/2021.acl-long.102/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.441, + 0.885, + 0.518 + ], + "angle": 0, + "content": "Weizhe Lin, Rexhina Blloshmi, Bill Byrne, Adria de Gispert, and Gonzalo Iglesias. An inner table retriever for robust table question answering. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 9909–9926, Toronto, Canada, July 2023a. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.551. URL https://aclanthology.org/2023.acl-long.551/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.527, + 0.885, + 0.619 + ], + "angle": 0, + "content": "Weizhe Lin, Rexhina Blloshmi, Bill Byrne, Adria de Gispert, and Gonzalo Iglesias. LI-RAGE: Late interaction retrieval augmented generation with explicit signals for open-domain table question answering. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pp. 1557-1566, Toronto, Canada, July 2023b. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-short.133. URL https://aclanthology.org/2023.acl-short.133/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.627, + 0.885, + 0.672 + ], + "angle": 0, + "content": "Chris Yuhao Liu, Liang Zeng, Jiacai Liu, Rui Yan, Jujie He, Chaojie Wang, Shuicheng Yan, Yang Liu, and Yahui Zhou. Skywork-reward: Bag of tricks for reward modeling in llms. arXiv preprint arXiv:2410.18451, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.682, + 0.882, + 0.715 + ], + "angle": 0, + "content": "Hanmeng Liu, Zhizhang Fu, Mengru Ding, Ruoxi Ning, Chaoli Zhang, Xiaozhang Liu, and Yue Zhang. Logical reasoning in large language models: A survey. arXiv preprint arXiv:2502.09100, 2025a." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.723, + 0.885, + 0.814 + ], + "angle": 0, + "content": "Jiachang Liu, Dinghan Shen, Yizhe Zhang, Bill Dolan, Lawrence Carin, and Weizhu Chen. What makes good in-context examples for GPT-3? In Eneko Agirre, Marianna Apidianaki, and Ivan Vulić (eds.), Proceedings of Deep Learning Inside Out (DeeLIO 2022): The 3rd Workshop on Knowledge Extraction and Integration for Deep Learning Architectures, pp. 100–114, Dublin, Ireland and Online, May 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.deelio-1.10. URL https://aclanthology.org/2022.deelio-1.10/." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.823, + 0.885, + 0.871 + ], + "angle": 0, + "content": "Liang Liu, Dong Zhang, Shoushan Li, Guodong Zhou, and Erik Cambria. Two heads are better than one: Zero-shot cognitive reasoning via multi-llm knowledge fusion. In Proceedings of the 33rd ACM International Conference on Information and Knowledge Management, pp. 1462–1472, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.879, + 0.885, + 0.925 + ], + "angle": 0, + "content": "Ryan Liu, Jiayi Geng, Addison J. Wu, Ilia Sucholutsky, Tania Lombrozo, and Thomas L. Griffiths. Mind your step (by step): Chain-of-thought can reduce performance on tasks where thinking makes humans worse, 2024c. URL https://arxiv.org/abs/2410.21333." + }, + { + "type": "list", + "bbox": [ + 0.113, + 0.103, + 0.885, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "54" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.103, + 0.885, + 0.196 + ], + "angle": 0, + "content": "Tianyang Liu, Fei Wang, and Muhao Chen. Rethinking tabular data understanding with large language models. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 450-482, Mexico City, Mexico, June 2024d. Association for Computational Linguistics. doi: 10.18653/v1/2024.naacl-long.26. URL https://aclanthology.org/2024.naacl-long.26/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.205, + 0.885, + 0.254 + ], + "angle": 0, + "content": "Tongxuan Liu, Xingyu Wang, Weizhe Huang, Wenjiang Xu, Yuting Zeng, Lei Jiang, Hailong Yang, and Jing Li. Groupdebate: Enhancing the efficiency of multi-agent debate using group discussion. arXiv preprint arXiv:2409.14051, 2024e." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.263, + 0.885, + 0.311 + ], + "angle": 0, + "content": "Yanchen Liu, Srishti Gautam, Jiaqi Ma, and Himabindu Lakkaraju. Investigating the fairness of large language models for predictions on tabular data. In *Socially Responsible Language Modelling Research*, 2023. URL https://openreview.net/forum?id=V1740FqidS." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.319, + 0.885, + 0.354 + ], + "angle": 0, + "content": "Yantao Liu, Zijun Yao, Rui Min, Yixin Cao, Lei Hou, and Juanzi Li. Pairwise rm: Perform best-of-n sampling with knockout tournament. arXiv preprint arXiv:2501.13007, 2025b." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.362, + 0.885, + 0.424 + ], + "angle": 0, + "content": "Zhihan Liu, Hao Hu, Shenao Zhang, Hongyi Guo, Shuqi Ke, Boyi Liu, and Zhaoran Wang. Reason for future, act for now: A principled architecture for autonomous LLM agents. In Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pp. 31186-31261. PMLR, 21-27 Jul 2024f." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.433, + 0.885, + 0.468 + ], + "angle": 0, + "content": "Zichen Liu, Changyu Chen, Wenjun Li, Tianyu Pang, Chao Du, and Min Lin. There may not be a hah moment in r1-zero-like training — a pilot study. https://oatllm.notion.site/oat-zero, 2025c. Notion Blog." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.476, + 0.885, + 0.524 + ], + "angle": 0, + "content": "Do Xuan Long, Hai Nguyen Ngoc, Tiviatis Sim, Hieu Dao, Shafiq Joty, Kenji Kawaguchi, Nancy F Chen, and Min-Yen Kan. Llms are biased towards output formats! systematically evaluating and mitigating output format bias of llms. arXiv preprint arXiv:2408.08656, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.532, + 0.885, + 0.625 + ], + "angle": 0, + "content": "Do Xuan Long, Duong Ngoc Yen, Anh Tuan Luu, Kenji Kawaguchi, Min-Yen Kan, and Nancy F. Chen. Multi-expert prompting improves reliability, safety and usefulness of large language models. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (eds.), Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pp. 20370-20401, Miami, Florida, USA, November 2024b. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.1135. URL https://aclanthology.org/2024.emnlp-main.1135/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.635, + 0.885, + 0.728 + ], + "angle": 0, + "content": "Do Xuan Long, Yiran Zhao, Hannah Brown, Yuxi Xie, James Zhao, Nancy Chen, Kenji Kawaguchi, Michael Shieh, and Junxian He. Prompt optimization via adversarial in-context learning. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 7308-7327, Bangkok, Thailand, August 2024c. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.395. URL https://aclanthology.org/2024.acl-long.395/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.737, + 0.885, + 0.786 + ], + "angle": 0, + "content": "Pan Lu, Baolin Peng, Hao Cheng, Michel Galley, Kai-Wei Chang, Ying Nian Wu, Song-Chun Zhu, and Jianfeng Gao. Chameleon: Plug-and-play compositional reasoning with large language models. Advances in Neural Information Processing Systems, 36, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.794, + 0.885, + 0.829 + ], + "angle": 0, + "content": "Weizheng Lu, Jing Zhang, Ju Fan, Zihao Fu, Yueguo Chen, and Xiaoyong Du. Large language model for table processing: A survey. Frontiers of Computer Science, 19(2):192350, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.836, + 0.885, + 0.871 + ], + "angle": 0, + "content": "Xinyuan Lu, Liangming Pan, Yubo Ma, Preslav Nakov, and Min-Yen Kan. Tart: An open-source tool-augmented framework for explainable table-based reasoning. arXiv preprint arXiv:2409.11724, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.878, + 0.885, + 0.927 + ], + "angle": 0, + "content": "Haipeng Luo, Qingfeng Sun, Can Xu, Pu Zhao, Jianguang Lou, Chongyang Tao, Xiubo Geng, Qingwei Lin, Shifeng Chen, and Dongmei Zhang. Wizardmath: Empowering mathematical reasoning for large language models via reinforced evol-instruct. arXiv preprint arXiv:2308.09583, 2023a." + }, + { + "type": "list", + "bbox": [ + 0.113, + 0.103, + 0.885, + 0.927 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "55" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.103, + 0.885, + 0.165 + ], + "angle": 0, + "content": "Haipeng Luo, Qingfeng Sun, Can Xu, Pu Zhao, Jianguang Lou, Chongyang Tao, Xiubo Geng, Qingwei Lin, Shifeng Chen, and Dongmei Zhang. Wizardmath: Empowering mathematical reasoning for large language models via reinforced evol-instruct. CoRR, abs/2308.09583, 2023b. doi: 10.48550/ARXIV.2308.09583. URL https://doi.org/10.48550/arXiv.2308.09583." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.177, + 0.885, + 0.223 + ], + "angle": 0, + "content": "Kangyang Luo, Zichen Ding, Zhenmin Weng, Lingfeng Qiao, Meng Zhao, Xiang Li, Di Yin, and Jinlong Shu. Let's be self-generated via step by step: A curriculum learning approach to automated reasoning with large language models. arXiv preprint arXiv:2410.21728, 2024a. URL https://arxiv.org/abs/2410.21728." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.234, + 0.885, + 0.295 + ], + "angle": 0, + "content": "Liangchen Luo, Yinxiao Liu, Rosanne Liu, Samrat Phatale, Harsh Lara, Yunxuan Li, Lei Shu, Yun Zhu, Lei Meng, Jiao Sun, and Abhinav Rastogi. Improve mathematical reasoning in language models by automated process supervision. CoRR, abs/2406.06592, 2024b. doi: 10.48550/ARXIV.2406.06592. URL https://doi.org/10.48550/arXiv.2406.06592." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.307, + 0.885, + 0.354 + ], + "angle": 0, + "content": "Liangchen Luo, Yinxiao Liu, Rosanne Liu, Samrat Phatale, Harsh Lara, Yunxuan Li, Lei Shu, Yun Zhu, Lei Meng, Jiao Sun, et al. Improve mathematical reasoning in language models by automated process supervision. arXiv preprint arXiv:2406.06592, 2024c." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.365, + 0.885, + 0.411 + ], + "angle": 0, + "content": "Man Luo, Xin Xu, Zhuyun Dai, Panupong Pasupat, Mehran Kazemi, Chitta Baral, Vaiva Imbrasaite, and Vincent Y Zhao. Dr. icl: Demonstration-retrieved in-context learning. arXiv preprint arXiv:2305.14128, 2023c." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.423, + 0.885, + 0.469 + ], + "angle": 0, + "content": "Man Luo, Xin Xu, Yue Liu, Panupong Pasupat, and Mehran Kazemi. In-context learning with retrieved demonstrations for language models: A survey. Transactions on Machine Learning Research, 2024d. ISSN 2835-8856. URL https://openreview.net/forum?id=NQPo8ZhQPa. Survey Certification." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.481, + 0.885, + 0.588 + ], + "angle": 0, + "content": "Qing Lyu, Shreya Havaldar, Adam Stein, Li Zhang, Delip Rao, Eric Wong, Marianna Apidianaki, and Chris Callison-Burch. Faithful chain-of-thought reasoning. In Jong C. Park, Yuki Arase, Baotian Hu, Wei Lu, Derry Wijaya, Ayu Purwarianti, and Adila Alfa Krisnadhi (eds.), Proceedings of the 13th International Joint Conference on Natural Language Processing and the 3rd Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 305-329, Nusa Dua, Bali, November 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.ijcnlp-main.20. URL https://aclanthology.org/2023.ijcnlp-main.20/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.599, + 0.885, + 0.689 + ], + "angle": 0, + "content": "Yubo Ma, Zhibin Gou, Junheng Hao, Ruochen Xu, Shuohang Wang, Liangming Pan, Yujiu Yang, Yixin Cao, and Aixin Sun. Sciagent: Tool-augmented language models for scientific reasoning. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (eds.), Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, EMNLP 2024, Miami, FL, USA, November 12-16, 2024, pp. 15701-15736. Association for Computational Linguistics, 2024a. URL https://aclanthology.org/2024.emnlp-main.880." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.702, + 0.885, + 0.749 + ], + "angle": 0, + "content": "Yubo Ma, Zhibin Gou, Junheng Hao, Ruochen Xu, Shuohang Wang, Liangming Pan, Yujiu Yang, Yixin Cao, Aixin Sun, Hany Awadalla, et al. Sciagent: Tool-augmented language models for scientific reasoning. arXiv preprint arXiv:2402.11451, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.761, + 0.885, + 0.837 + ], + "angle": 0, + "content": "Aman Madaan, Katherine Hermann, and Amir Yazdanbakhsh. What makes chain-of-thought prompting effective? a counterfactual study. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Findings of the Association for Computational Linguistics: EMNLP 2023, pp. 1448-1535, Singapore, December 2023a. Association for Computational Linguistics. doi: 10.18653/v1/2023.findings-emnlp.101. URL https://aclanthology.org/2023-findings-emnlp.101/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.849, + 0.885, + 0.925 + ], + "angle": 0, + "content": "Aman Madaan, Niket Tandon, Prakhar Gupta, Skyler Hallinan, Luyu Gao, Sarah Wiegreffe, Uri Alon, Nouha Dziri, Shrimai Prabhumoye, Yiming Yang, Shashank Gupta, Bodhisattwa Prasad Majumder, Katherine Hermann, Sean Welleck, Amir Yazdanbakhsh, and Peter Clark. Self-refine: Iterative refinement with self-feedback. In Thirty-seventh Conference on Neural Information Processing Systems, 2023b. URL https://openreview.net/forum?id=S37h0erQLB." + }, + { + "type": "list", + "bbox": [ + 0.115, + 0.103, + 0.885, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "56" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.103, + 0.885, + 0.151 + ], + "angle": 0, + "content": "Aman Madaan, Niket Tandon, Prakhar Gupta, Skyler Hallinan, Luyu Gao, Sarah Wiegreffe, Uri Alon, Nouha Dziri, Shrimai Prabhumoye, Yiming Yang, et al. Self-refine: Iterative refinement with self-feedback. Advances in Neural Information Processing Systems, 36, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.158, + 0.885, + 0.204 + ], + "angle": 0, + "content": "Dakota Mahan, Duy Van Phung, Rafael Rafailov, Chase Blagden, Nathan Lile, Louis Castricato, Jan-Philipp Franken, Chelsea Finn, and Alon Albalak. Generative reward models. arXiv preprint arXiv:2410.12832, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.214, + 0.885, + 0.246 + ], + "angle": 0, + "content": "XTX Markets. AIMO Progress Prize: July 2024 results. https://aimoprize.com/updates/2024-07-20-progress-prize-results, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.254, + 0.885, + 0.299 + ], + "angle": 0, + "content": "Tula Masterman, Sandi Besen, Mason Sawtell, and Alex Chao. The landscape of emerging ai agent architectures for reasoning, planning, and tool calling: A survey, 2024. URL https://arxiv.org/abs/2404.11584." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.308, + 0.885, + 0.355 + ], + "angle": 0, + "content": "Marco Matta, Gian Carlo Cardarilli, Luca Di Nunzio, Rocco Fazzolari, Daniele Giardino, M Re, F Silvestri, and S Spanò. Q-rts: a real-time swarm intelligence based on multi-agent q-learning. _Electronics Letters_, 55(10):589–591, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.363, + 0.885, + 0.395 + ], + "angle": 0, + "content": "Nat McAleese, Rai Michael Pokorny, Juan Felipe Ceron Uribe, Evgenia Nitishinskaya, Maja Trebacz, and Jan Leike. Llm critics help catch llm bugs. arXiv preprint arXiv:2407.00215, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.403, + 0.885, + 0.496 + ], + "angle": 0, + "content": "Raja Sekhar Reddy Mekala, Yasaman Razeghi, and Sameer Singh. EchoPrompt: Instructing the model to rephrase queries for improved in-context learning. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 2: Short Papers), pp. 399-432, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.naacl-short.35. URL https://aclanthology.org/2024.naacl-short.35." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.504, + 0.885, + 0.536 + ], + "angle": 0, + "content": "Yu Meng, Mengzhou Xia, and Danqi Chen. Simpo: Simple preference optimization with a reference-free reward. arXiv preprint arXiv:2405.14734, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.543, + 0.885, + 0.591 + ], + "angle": 0, + "content": "William Merrill and Ashish Sabharwal. The expressive power of transformers with chain of thought. In *The Twelfth International Conference on Learning Representations*, 2024. URL https://openreview.net/forum?id=NjNGLPh8Wh." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.599, + 0.885, + 0.646 + ], + "angle": 0, + "content": "Yingqian Min, Zhipeng Chen, Jinhao Jiang, Jie Chen, Jia Deng, Yiwen Hu, Yiru Tang, Jiapeng Wang, Xiaoxue Cheng, Huatong Song, et al. Imitate, explore, and self-improve: A reproduction report on slow-thinking reasoning systems. arXiv preprint arXiv:2412.09413, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.653, + 0.885, + 0.715 + ], + "angle": 0, + "content": "Yifei Ming, Senthil Purushwalkam, Shrey Pandit, Zixuan Ke, Xuan-Phi Nguyen, Caiming Xiong, and Shafiq Joty. Faitheval: Can your language model stay faithful to context, even if \"the moon is made of marshmallows\". In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=UeVx6L59fg." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.723, + 0.885, + 0.786 + ], + "angle": 0, + "content": "Seyed Iman Mirzadeh, Keivan Alizadeh, Hooman Shahrokhi, Oncel Tuzel, Samy Bengio, and Mehrdad Farajtabar. GSM-symbolic: Understanding the limitations of mathematical reasoning in large language models. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=AjXkRZIvjb." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.794, + 0.885, + 0.871 + ], + "angle": 0, + "content": "Swaroop Mishra, Daniel Khashabi, Chitta Baral, and Hannaneh Hajishirzi. Cross-task generalization via natural language crowdsourcing instructions. In Smaranda Muresan, Preslav Nakov, and Aline Villavicencio (eds.), Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 3470-3487, Dublin, Ireland, May 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.acl-long.244. URL https://aclanthology.org/2022.acl-long.244/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.879, + 0.885, + 0.926 + ], + "angle": 0, + "content": "Philipp Mondorf and Barbara Plank. Beyond accuracy: Evaluating the reasoning behavior of large language models - a survey. In First Conference on Language Modeling, 2024. URL https://openreview.net/forum?id=Lmjgl2n11u." + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.103, + 0.885, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "57" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.103, + 0.885, + 0.151 + ], + "angle": 0, + "content": "Nieves Montes, Michael Luck, Nardine Osman, Odinaldo Rodrigues, and Carles Sierra. Combining theory of mind and abductive reasoning in agent-oriented programming. Autonomous Agents and Multi-Agent Systems, 37(2):36, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.157, + 0.885, + 0.192 + ], + "angle": 0, + "content": "Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettle-moyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.197, + 0.885, + 0.275 + ], + "angle": 0, + "content": "Md Mahadi Hasan Nahid and Davood Rafiei. NormTab: Improving symbolic reasoning in LLMs through tabular data normalization. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (eds.), Findings of the Association for Computational Linguistics: EMNLP 2024, pp. 3569-3585, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-emnlp.203. URL https://aclanthology.org/2024 findings-emnlp.203/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.282, + 0.885, + 0.316 + ], + "angle": 0, + "content": "Allen Newell, John C Shaw, and Herbert A Simon. Report on a general problem solving program. In IFIP congress, volume 256, pp. 64. Pittsburgh, PA, 1959." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.322, + 0.885, + 0.354 + ], + "angle": 0, + "content": "Allen Newell, Herbert Alexander Simon, et al. Human problem solving, volume 104. Prentice-hall Englewood Cliffs, NJ, 1972." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.361, + 0.885, + 0.395 + ], + "angle": 0, + "content": "Khanh Nguyen, Hal Daumé III, and Jordan Boyd-Graber. Reinforcement learning for bandit neural machine translation with simulated human feedback. arXiv preprint arXiv:1707.07402, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.4, + 0.885, + 0.448 + ], + "angle": 0, + "content": "Ansong Ni, Miltiadis Allamanis, Arman Cohan, Yinlin Deng, Kensen Shi, Charles Sutton, and Pengcheng Yin. Next: Teaching large language models to reason about code execution. In ICML, 2024. URL https://openreview.net/forum?id=B1W712hMBi." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.455, + 0.885, + 0.488 + ], + "angle": 0, + "content": "Tobias Nipkow, Markus Wenzel, and Lawrence C Paulson. Isabelle/HOL: a proof assistant for higher-order logic. 2002." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.495, + 0.885, + 0.527 + ], + "angle": 0, + "content": "NovaSky Team. Sky-t1: Train your own o1 preview model within $450. https://novaskyai.github.io/posts/sky-t1, 2025. Accessed: 2025-01-09." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.533, + 0.885, + 0.597 + ], + "angle": 0, + "content": "Maxwell Nye, Anders Andreassen, Guy Gur-Ari, Henryk Witold Michalewski, Jacob Austin, David Bieber, David Martin Dohan, Aitor Lewkowycz, Maarten Paul Bosma, David Luan, Charles Sutton, and Augustus Odena. Show your work: Scratchpads for intermediate computation with language models, 2021. https://arxiv.org/abs/2112.00114." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.603, + 0.885, + 0.651 + ], + "angle": 0, + "content": "Catherine Olsson, Nelson Elhage, Neel Nanda, Nicholas Joseph, Nova DasSarma, Tom Henighan, Ben Mann, Amanda Askell, Yuntao Bai, Anna Chen, et al. In-context learning and induction heads. arXiv preprint arXiv:2209.11895, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.658, + 0.724, + 0.676 + ], + "angle": 0, + "content": "OpenAI. Introducing gpt-4.5. https://openai.com/index/introducing-gpt-4-5/, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.682, + 0.885, + 0.926 + ], + "angle": 0, + "content": "OpenAI, :, Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, Alex Iftimie, Alex Karpenko, Alex Tachard Passos, Alexander Neitz, Alexander Prokofiev, Alexander Wei, Allison Tam, Ally Bennett, Ananya Kumar, Andre Saraiva, Andrea Vallone, Andrew Duberstein, Andrew Kondrich, Andrey Mishchenko, Andy Applebaum, Angela Jiang, Ashvin Nair, Barret Zoph, Behrooz Ghorbani, Ben Rossen, Benjamin Sokolowsky, Boaz Barak, Bob McGrew, Borys Minaiev, Botao Hao, Bowen Baker, Brandon Houghton, Brandon McKinzie, Brydon Eastman, Camillo Lugaresi, Cary Bassin, Cary Hudson, Chak Ming Li, Charles de Bourcy, Chelsea Voss, Chen Shen, Chong Zhang, Chris Koch, Chris Orsinger, Christopher Hesse, Claudia Fischer, Clive Chan, Dan Roberts, Daniel Kappler, Daniel Levy, Daniel Selsam, David Dohan, David Farhi, David Mely, David Robinson, Dimitris Tsipras, Doug Li, Dragos Oprica, Eben Freeman, Eddie Zhang, Edmund Wong, Elizabeth Proehl, Enoch Cheung, Eric Mitchell, Eric Wallace, Erik Ritter, Evan Mays, Fan Wang, Felipe Petroski Such, Filippo Raso, Florencia Leoni, Foivos Tsimpourlas, Francis Song, Fred von Lohmann, Freddie Sulit, Geoff Salmon, Giambattista Parascandolo, Gildas Chabot, Grace Zhao, Greg Brockman, Guillaume Leclerc, Hadi Salman, Haiming Bao, Hao Sheng, Hart Andrin, Hessam Bagherinezhad, Hongyu Ren, Hunter Lightman, Hyung Won Chung, Ian Kivlichan, Ian O'Connell, Ian Osband, Ignasi Clavera Gilaberte, Ilge Akkaya, Ilya Kostrikov, Ilya Sutskever, Irina Kofman, Jakub Pachocki, James Lennon, Jason" + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.103, + 0.885, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "58" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.13, + 0.103, + 0.885, + 0.452 + ], + "angle": 0, + "content": "Wei, Jean Harb, Jerry Twore, Jiacheng Feng, Jiahui Yu, Jiayi Weng, Jie Tang, Jieqi Yu, Joaquin Quñonero Candela, Joe Palermo, Joel Parish, Johannes Heidecke, John Hallman, John Rizzo, Jonathan Gordon, Jonathan Uesato, Jonathan Ward, Joost Huizinga, Julie Wang, Kai Chen, Kai Xiao, Karan Singhal, Karina Nguyen, Karl Cobbe, Katy Shi, Kayla Wood, Kendra Rimbach, Keren Gu-Lemberg, Kevin Liu, Kevin Lu, Kevin Stone, Kevin Yu, Lama Ahmad, Lauren Yang, Leo Liu, Leon Maksin, Leyton Ho, Liam Fedus, Lilian Weng, Linden Li, Lindsay McCallum, Lindsey Held, Lorenz Kuhn, Lukas Kondraciuk, Lukasz Kaiser, Luke Metz, Madelaine Boyd, Maja Trebacz, Manas Joglekar, Mark Chen, Marko Tintor, Mason Meyer, Matt Jones, Matt Kaufer, Max Schwarzer, Meghan Shah, Mehmet Yatbaz, Melody Y. Guan, Mengyuan Xu, Mengyuan Yan, Mia Glaese, Mianna Chen, Michael Lampe, Michael Malek, Michele Wang, Michelle Fradin, Mike McClay, Mikhail Pavlov, Miles Wang, Mingxuan Wang, Mira Murati, Mo Bavarian, Mostafa Rohaninejad, Nat McAleese, Neil Chowdhury, Neil Chowdhury, Nick Ryder, Nikolas Tezak, Noam Brown, Ofir Nachum, Oleg Boiko, Oleg Murk, Olivia Watkins, Patrick Chao, Paul Ashbourne, Pavel Izmailov, Peter Zhokhov, Rachel Dias, Rahul Arora, Randall Lin, Rapha Gontijo Lopes, Raz Gaon, Reah Miyara, Reimar Leike, Renny Hwang, Rhythm Garg, Robin Brown, Roshan James, Rui Shu, Ryan Cheu, Ryan Greene, Saachi Jain, Sam Altman, Sam Toizer, Sam Toyer, Samuel Miserendino, Sandhini Agarwal, Santiago Hernandez, Sasha Baker, Scott McKinney, Scottie Yan, Shengjia Zhao, Shengli Hu, Shibani Santurkar, Shraman Ray Chaudhuri, Shuyuan Zhang, Siyuan Fu, Spencer Papay, Steph Lin, Suchir Balaji, Suvansh Sanjeev, Szymon Sidor, Tal Broda, Aidan Clark, Tao Wang, Taylor Gordon, Ted Sanders, Tejal Patwardhan, Thibault Sottiaux, Thomas Degry, Thomas Dimson, Tianhao Zheng, Timur Garipov, Tom Stasi, Trapit Bansal, Trevor Creech, Troy Peterson, Tyna Eloundou, Valerie Qi, Vineet Kosaraju, Vinnie Monaco, Vitchyr Pong, Vlad Fomenko, Weiyi Zheng, Wenda Zhou, Wes McCabe, Wojciech Zaremba, Yann Dubois, Yinghai Lu, Yining Chen, Young Cha, Yu Bai, Yuchen He, Yuchen Zhang, Yunyun Wang, Zheng Shao and Zhuohan Li. Openai o1 system card 2024. URL https://arxiv.org/abs/2412.16720." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.461, + 0.885, + 0.539 + ], + "angle": 0, + "content": "OpenAI, :, Ahmed El-Kishky, Alexander Wei, Andre Saraiva, Borys Minaev, Daniel Selsam, David Dohan, Francis Song, Hunter Lightman, Ignasi Clavera, Jakub Pachocki, Jerry Tworek, Lorenz Kuhn, Lukasz Kaiser, Mark Chen, Max Schwarzer, Mostafa Rohaninejad, Nat McAleese, o3 contributors, Oleg Mürk, Rhythm Garg, Rui Shu, Szymon Sidor, Vineet Kosaraju, and Wenda Zhou. Competitive programming with large reasoning models, 2025. URL https://arxiv.org/abs/2502.06807." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.548, + 0.885, + 0.623 + ], + "angle": 0, + "content": "Long Ouyang, Jeff Wu, Xu Jiang, Diogo Almeida, Carroll L. Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, John Schulman, Jacob Hilton, Fraser Kelton, Luke Miller, Maddie Simens, Amanda Askell, Peter Welinder, Paul Christiano, Jan Leike, and Ryan Lowe. Training language models to follow instructions with human feedback, 2022. URL https://arxiv.org/abs/2203.02155." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.635, + 0.885, + 0.682 + ], + "angle": 0, + "content": "Bo Pan, Jiaying Lu, Ke Wang, Li Zheng, Zhen Wen, Yingchaojie Feng, Minfeng Zhu, and Wei Chen. Agent-coord: Visually exploring coordination strategy for llm-based multi-agent collaboration. arXiv preprint arXiv:2404.11943, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.69, + 0.885, + 0.768 + ], + "angle": 0, + "content": "Liangming Pan, Alon Albalak, Xinyi Wang, and William Wang. Logic-LM: Empowering large language models with symbolic solvers for faithful logical reasoning. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Findings of the Association for Computational Linguistics: EMNLP 2023, pp. 3806-3824, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-emnlp.248. URL https://aclanthology.org/2023-findings-emnlp.248/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.777, + 0.885, + 0.839 + ], + "angle": 0, + "content": "Liangming Pan, Michael Saxon, Wenda Xu, Deepak Nathani, Xinyi Wang, and William Yang Wang. Automatically correcting large language models: Surveying the landscape of diverse automated correction strategies. Transactions of the Association for Computational Linguistics, 12:484-506, 2024b. doi: 10.1162/tacl_a_00660. URL https://aclanthology.org/2024.tacl-1.27/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.849, + 0.885, + 0.926 + ], + "angle": 0, + "content": "Bhargavi Paranjape, Julian Michael, Marjan Ghazvininejad, Hannaneh Hajishirzi, and Luke Zettlemoyer. Prompting contrastive explanations for commonsense reasoning tasks. In Chengqing Zong, Fei Xia, Wenjie Li, and Roberto Navigli (eds.), Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021, pp. 4179-4192, Online, August 2021. Association for Computational Linguistics. doi: 10.18653/v1/2021-findings-acl.366. URL https://aclanthology.org/2021-findings-acl.366/." + }, + { + "type": "list", + "bbox": [ + 0.115, + 0.461, + 0.885, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "59" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.103, + 0.885, + 0.137 + ], + "angle": 0, + "content": "Remo Pareschi. Abductive reasoning with the gpt-4 language model: Case studies from criminal investigation, medical practice, scientific research. _Sistema intelligenti_, 35(2):435-444, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.146, + 0.509, + 0.163 + ], + "angle": 0, + "content": "John Arthur Passmore. Philosophical reasoning. 1961." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.173, + 0.885, + 0.22 + ], + "angle": 0, + "content": "Pouya Pezeshkpour, Eser Kandogan, Nikita Bhutani, Sajjadur Rahman, Tom Mitchell, and Estevam Hruschka. Reasoning capacity in multi-agent systems: Limitations, challenges and human-centered solutions, 2024. URL https://arxiv.org/abs/2402.01108." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.23, + 0.885, + 0.277 + ], + "angle": 0, + "content": "Gabriel Poesia, Kanishk Gandhi, Eric Zelikman, and Noah Goodman. Certified deductive reasoning with language models. Transactions on Machine Learning Research, 2024. ISSN 2835-8856. URL https://openreview.net/forum?id=yXnwrs2T16." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.287, + 0.885, + 0.334 + ], + "angle": 0, + "content": "Mohammadreza Pourreza and Davood Rafiei. DIN-SQL: Decomposed in-context learning of text-to-SQL with self-correction. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=p53QDxSIc5." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.345, + 0.885, + 0.436 + ], + "angle": 0, + "content": "Ben Prystawski, Michael Li, and Noah D. Goodman. Why think step by step? reasoning emerges from the locality of experience. In Alice Oh, Tristan Naumann, Amir Globerson, Kate Saenko, Moritz Hardt, and Sergey Levine (eds.), Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. URL http://papers.nips.cc/paper_files/paper/2023/bitstream/e0af79ad53a336b4c4b4f7e2a68eb609-Abstract-Conference.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.447, + 0.885, + 0.525 + ], + "angle": 0, + "content": "Reid Pryzant, Dan Iter, Jerry Li, Yin Lee, Chenguang Zhu, and Michael Zeng. Automatic prompt optimization with \"gradient descent\" and beam search. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pp. 7957-7968, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.494. URL https://aclanthology.org/2023.emnlp-main.494/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.535, + 0.885, + 0.582 + ], + "angle": 0, + "content": "Pranav Putta, Edmund Mills, Naman Garg, Sumeet Motwani, Chelsea Finn, Divyansh Garg, and Rafael Rafailov. Agent q: Advanced reasoning and learning for autonomous ai agents. arXiv preprint arXiv:2408.07199, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.592, + 0.885, + 0.639 + ], + "angle": 0, + "content": "Zhenting Qi, Hongyin Luo, Xuliang Huang, Zhuokai Zhao, Yibo Jiang, Xiangjun Fan, Himabindu Lakkaraju, and James Glass. Quantifying generalization complexity for large language models, 2024. URL https://arxiv.org/abs/2410.01769." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.649, + 0.885, + 0.682 + ], + "angle": 0, + "content": "Shuofei Qiao, Honghao Gui, Chengfei Lv, Qianghuai Jia, Huajun Chen, and Ningyu Zhang. Making language models better tool learners with execution feedback. arXiv preprint arXiv:2305.13068, 2023a." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.691, + 0.885, + 0.754 + ], + "angle": 0, + "content": "Shuofei Qiao, Yixin Ou, Ningyu Zhang, Xiang Chen, Yunzhi Yao, Shumin Deng, Chuanqi Tan, Fei Huang, and Huajun Chen. Reasoning with language model prompting: A survey. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 5368-5393, Toronto, Canada, July 2023b. URL https://aclanthology.org/2023.acl-long.294/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.763, + 0.885, + 0.811 + ], + "angle": 0, + "content": "Chengwei Qin, Wenhan Xia, Tan Wang, Fangkai Jiao, Yuchen Hu, Bosheng Ding, Ruirui Chen, and Shafiq Joty. Relevant or random: Can llms truly perform analogical reasoning? ACL-Findings, 2025. URL https://arxiv.org/abs/2404.12728." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.821, + 0.885, + 0.868 + ], + "angle": 0, + "content": "Yiwei Qin, Xuefeng Li, Haoyang Zou, Yixiu Liu, Shijie Xia, Zhen Huang, Yixin Ye, Weizhe Yuan, Hector Liu, Yuanzhi Li, and Pengfei Liu. O1 replication journey: A strategic progress report - part 1, 2024. URL https://arxiv.org/abs/2410.18982." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.878, + 0.885, + 0.925 + ], + "angle": 0, + "content": "Xihe Qiu, Haoyu Wang, Xiaoyu Tan, Chao Qu, Yujie Xiong, Yuan Cheng, Yinghui Xu, Wei Chu, and Yuan Qi. Towards collaborative intelligence: Propagating intentions and reasoning for multi-agent coordination with large language models, 2024. URL https://arxiv.org/abs/2407.12532." + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.103, + 0.885, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "60" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.103, + 0.885, + 0.151 + ], + "angle": 0, + "content": "Yuxiao Qu, Tianjun Zhang, Naman Garg, and Aviral Kumar. Recursive introspection: Teaching language model agents how to self-improve. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024a. URL https://openreview.net/forum?id=DRC9pZwBwR." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.158, + 0.885, + 0.192 + ], + "angle": 0, + "content": "Yuxiao Qu, Tianjun Zhang, Naman Garg, and Aviral Kumar. Recursive introspection: Teaching language model agents how to self-improve. arXiv preprint arXiv:2407.18219, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.198, + 0.885, + 0.293 + ], + "angle": 0, + "content": "Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D. Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. In Alice Oh, Tristan Naumann, Amir Globerson, Kate Saenko, Moritz Hardt, and Sergey Levine (eds.), Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. URL http://papers.nips.cc/paper_files/paper/2023/hash/a85b405ed65c6477a4fe8302b5e06ce7-AAbstract-Conference.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.298, + 0.885, + 0.332 + ], + "angle": 0, + "content": "Nazneen Fatema Rajani, Bryan McCann, Caiming Xiong, and Richard Socher. Explain yourself! leveraging language models for commonsense reasoning. arXiv preprint arXiv:1906.02361, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.338, + 0.885, + 0.386 + ], + "angle": 0, + "content": "Shyam Sundhar Ramesh, Yifan Hu, Iason Chaimalas, Viraj Mehta, Pier Giuseppe Sessa, Haitham Bou Ammar, and Ilija Bogunovic. Group robust preference optimization in reward-free rlhf. arXiv preprint arXiv:2405.20304, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.393, + 0.885, + 0.441 + ], + "angle": 0, + "content": "Jingqing Ruan, Yali Du, Xuantang Xiong, Dengpeng Xing, Xiyun Li, Linghui Meng, Haifeng Zhang, Jun Wang, and Bo Xu. Gcs: Graph-based coordination strategy for multi-agent reinforcement learning. arXiv preprint arXiv:2201.06257, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.448, + 0.885, + 0.541 + ], + "angle": 0, + "content": "Ohad Rubin, Jonathan Herzig, and Jonathan Berant. Learning to retrieve prompts for in-context learning. In Marine Carpuat, Marie-Catherine de Marneffé, and Ivan Vladimir Meza Ruiz (eds.), Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 2655-2671, Seattle, United States, July 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.naacl-main.191. URL https://aclanthology.org/2022.naacl-main.191/." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.548, + 0.885, + 0.567 + ], + "angle": 0, + "content": "Stuart Russell and Peter Norvig. Artificial Intelligence: A Modern Approach. Prentice Hall, 3 edition, 2010." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.574, + 0.885, + 0.621 + ], + "angle": 0, + "content": "Jon Saad-Falcon, Rajan Vivek, William Berrios, Nandita Shankar Naik, Matija Franklin, Bertie Vidgen, Amanpreet Singh, Douwe Kiela, and Shikib Mehri. Lmunit: Fine-grained evaluation with natural language unit tests. arXiv preprint arXiv:2412.13091, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.628, + 0.885, + 0.661 + ], + "angle": 0, + "content": "Amir Saeidi, Shivanshu Verma, Aswin RRV, and Chitta Baral. Triple preference optimization: Achieving better alignment with less data in a single step optimization. arXiv preprint arXiv:2405.16681, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.668, + 0.885, + 0.792 + ], + "angle": 0, + "content": "Victor Sanh, Albert Webson, Colin Raffel, Stephen Bach, Lintang Sutawika, Zaid Alyafeai, Antoine Chaffin, Arnaud Stiegler, Arun Raja, Manan Dey, M Saiful Bari, Canwen Xu, Urmish Thakker, Shanya Sharma Sharma, Eliza Szczechla, Taewoon Kim, Gunjan Chhablani, Nihal Nayak, Debajyoti Datta, Jonathan Chang, Mike Tian-Jian Jiang, Han Wang, Matteo Manica, Sheng Shen, Zheng Xin Yong, Harshit Pandey, Rachel Bawden, Thomas Wang, Trishala Neeraj, Jos Rozen, Abheesht Sharma, Andrea Santilli, Thibault Fevry, Jason Alan Fries, Ryan Teehan, Teven Le Scao, Stella Biderman, Leo Gao, Thomas Wolf, and Alexander M Rush. Multitask prompted training enables zero-shot task generalization. In International Conference on Learning Representations, 2022. URL https://openreview.net/forum?id=9Vrb9D0WI4." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.798, + 0.885, + 0.847 + ], + "angle": 0, + "content": "Abulhair Saparov and He He. Language models are greedy reasoners: A systematic formal analysis of chain-of-thought. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=qFVVBzXxR2V." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.853, + 0.885, + 0.887 + ], + "angle": 0, + "content": "William Saunders, Catherine Yeh, Jeff Wu, Steven Bills, Long Ouyang, Jonathan Ward, and Jan Leike. Self-critiquing models for assisting human evaluators. arXiv preprint arXiv:2206.05802, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.893, + 0.885, + 0.927 + ], + "angle": 0, + "content": "Erik Schluntz and Barry Zhang. Building effective agents. https://www.anthropic.com/, Dec 2024. URL https://www.anthropic.com/research/building-effective-agents." + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.103, + 0.885, + 0.927 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "61" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.103, + 0.885, + 0.137 + ], + "angle": 0, + "content": "John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.144, + 0.885, + 0.207 + ], + "angle": 0, + "content": "Melanie Sclar, Yejin Choi, Yulia Tsvetkov, and Alane Suhr. Quantifying language models' sensitivity to spurious features in prompt design or: How i learned to start worrying about prompt formatting. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=RIu51yNXjT." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.215, + 0.885, + 0.294 + ], + "angle": 0, + "content": "S Seals and Valerie Shalin. Evaluating the deductive competence of large language models. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 8614-8630, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.naacl-long.476. URL https://aclanthology.org/2024.naacl-long.476/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.302, + 0.885, + 0.334 + ], + "angle": 0, + "content": "H Seo and D Lee. Reinforcement learning and strategic reasoning during social decision-making. In Decision Neuroscience, pp. 225-231. Elsevier, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.343, + 0.885, + 0.42 + ], + "angle": 0, + "content": "Pier Giuseppe Sessa, Robert Dadashi, Léonard Hussenot, Johan Ferret, Nino Vieillard, Alexandre Ramé, Bobak Shahriari, Sarah Perrin, Abe Friesen, Geoffrey Cideron, Sertan Girgin, Piotr Stanczyk, Andrea Michi, Danila Sinopalnikov, Sabela Ramos, Amélie Héliou, Aliaksei Severyn, Matt Hoffman, Nikola Momchev, and Olivier Bachem. BOND: aligning llms with best-of-n distillation. CoRR, abs/2407.14622, 2024. URL https://doi.org/10.48550/arXiv.2407.14622." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.429, + 0.885, + 0.476 + ], + "angle": 0, + "content": "Amrith Setlur, Chirag Nagpal, Adam Fisch, Xinyang Geng, Jacob Eisenstein, Rishabh Agarwal, Alekh Agarwal, Jonathan Berant, and Aviral Kumar. Rewarding progress: Scaling automated process verifiers for llm reasoning. arXiv preprint arXiv:2410.08146, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.485, + 0.885, + 0.547 + ], + "angle": 0, + "content": "Amrith Setlur, Chirag Nagpal, Adam Fisch, Xinyang Geng, Jacob Eisenstein, Rishabh Agarwal, Alekh Agarwal, Jonathan Berant, and Aviral Kumar. Rewarding progress: Scaling automated process verifiers for LLM reasoning. CoRR, abs/2410.08146, 2024b. doi: 10.48550/ARXIV.2410.08146. URL https://doi.org/10.48550/arXiv.2410.08146." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.557, + 0.885, + 0.589 + ], + "angle": 0, + "content": "Murray Shanahan, Kyle McDonell, and Laria Reynolds. Role play with large language models. Nature, 623 (7987):493-498, 2023a." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.597, + 0.885, + 0.63 + ], + "angle": 0, + "content": "Murray Shanahan, Kyle McDonell, and Laria Reynolds. Role-play with large language models, 2023b. URL https://arxiv.org/abs/2305.16367." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.639, + 0.885, + 0.686 + ], + "angle": 0, + "content": "Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.695, + 0.885, + 0.742 + ], + "angle": 0, + "content": "Zhengliang Shi, Weiwei Sun, Shen Gao, Pengjie Ren, Zhumin Chen, and Zhaochun Ren. Generate-then-ground in retrieval-augmented generation for multi-hop question answering. arXiv preprint arXiv:2406.14891, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.751, + 0.885, + 0.797 + ], + "angle": 0, + "content": "Noah Shinn, Federico Cassano, Ashwin Gopinath, Karthik Narasimhan, and Shunyu Yao. Reflexion: Language agents with verbal reinforcement learning. Advances in Neural Information Processing Systems, 36, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.806, + 0.885, + 0.87 + ], + "angle": 0, + "content": "Kumar Shridhar, Koustuv Sinha, Andrew Cohen, Tianlu Wang, Ping Yu, Ramakanth Pasunuru, Mrinmaya Sachan, Jason Weston, and Asli Celikyilmaz. The art of llm refinement: Ask, refine, and trust. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 5872-5883, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.878, + 0.885, + 0.926 + ], + "angle": 0, + "content": "Chenglei Si, Zhe Gan, Zhengyuan Yang, Shuohang Wang, Jianfeng Wang, Jordan Lee Boyd-Graber, and Lijuan Wang. Prompting GPT-3 to be reliable. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=98p5x51L5af." + }, + { + "type": "list", + "bbox": [ + 0.113, + 0.103, + 0.885, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "62" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.103, + 0.885, + 0.15 + ], + "angle": 0, + "content": "Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling LLM test-time compute optimally can be more effective than scaling model parameters. CoRR, abs/2408.03314, 2024. doi: 10.48550/ARXIV.2408.03314. URL https://doi.org/10.48550/arXiv.2408.03314." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.161, + 0.885, + 0.208 + ], + "angle": 0, + "content": "Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling test-time compute optimally can be more effective than scaling LLM parameters. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=4FWAwZtd2n." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.218, + 0.885, + 0.267 + ], + "angle": 0, + "content": "Yifan Song, Weimin Xiong, Xiutian Zhao, Dawei Zhu, Wenhao Wu, Ke Wang, Cheng Li, Wei Peng, and Sujian Li. Agentbank: Towards generalized llm agents via fine-tuning on \\(50000+\\) interaction trajectories. arXiv preprint arXiv:2410.07706, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.276, + 0.885, + 0.338 + ], + "angle": 0, + "content": "Zayne Sprague, Fangcong Yin, Juan Diego Rodriguez, Dongwei Jiang, Manya Wadhwa, Prasann Singhal, Xinyu Zhao, Xi Ye, Kyle Mahowald, and Greg Durrett. To cot or not to cot? chain-of-thought helps mainly on math and symbolic reasoning. arXiv preprint arXiv:2409.12183, 2024a. URL https://arxiv.org/pdf/2409.12183." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.349, + 0.885, + 0.397 + ], + "angle": 0, + "content": "Zayne Sprague, Fangcong Yin, Juan Diego Rodriguez, Dongwei Jiang, Manya Wadhwa, Prasann Singhal, Xinyu Zhao, Xi Ye, Kyle Mahowald, and Greg Durrett. To cot or not to cot? chain-of-thought helps mainly on math and symbolic reasoning, 2024b. URL https://arxiv.org/abs/2409.12183." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.406, + 0.885, + 0.439 + ], + "angle": 0, + "content": "Keith E Stanovich and Richard F West. Individual differences in reasoning: Implications for the rationality debate? Behavioral and Brain Sciences, 23(5):645-665, 2000." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.449, + 0.885, + 0.482 + ], + "angle": 0, + "content": "Kaya Stechly, Matthew Marquez, and Subbarao Kambhampati. Gpt-4 doesn't know it's wrong: An analysis of iterative prompting for reasoning problems. arXiv preprint arXiv:2310.12397, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.491, + 0.885, + 0.524 + ], + "angle": 0, + "content": "Kaya Stechly, Karthik Valmeekam, and Subbarao Kambhampati. On the self-verification limitations of large language models on reasoning and planning tasks. arXiv preprint arXiv:2402.08115, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.533, + 0.885, + 0.595 + ], + "angle": 0, + "content": "Nisan Stiannon, Long Ouyang, Jeff Wu, Daniel M. Ziegler, Ryan Lowe, Chelsea Voss, Alec Radford, Dario Amodei, and Paul Christiano. Learning to summarize from human feedback. In Proceedings of the 34th International Conference on Neural Information Processing Systems, NIPS '20, Red Hook, NY, USA, 2020. Curran Associates Inc. ISBN 9781713829546." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.606, + 0.885, + 0.639 + ], + "angle": 0, + "content": "Benedikt Stroebl, Sayash Kapoor, and Arvind Narayanan. Inference Scaling fLaws: The Limits of LLM Resampling with Imperfect Verifiers. arXiv preprint arXiv:2411.17501, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.649, + 0.885, + 0.694 + ], + "angle": 0, + "content": "Vighnesh Subramaniam, Yilun Du, Joshua B Tenenbaum, Antonio Torralba, Shuang Li, and Igor Mordatch. Multiagent finetuning: Self improvement with diverse reasoning chains. arXiv preprint arXiv:2501.05707, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.706, + 0.885, + 0.783 + ], + "angle": 0, + "content": "Yuan Sui, Mengyu Zhou, Mingjie Zhou, Shi Han, and Dongmei Zhang. Table meets llm: Can large language models understand structured table data? a benchmark and empirical study. In Proceedings of the 17th ACM International Conference on Web Search and Data Mining, WSDM '24, pp. 645-654, New York, NY, USA, 2024. Association for Computing Machinery. ISBN 9798400703713. doi: 10.1145/3616855.3635752. URL https://doi.org/10.1145/3616855.3635752." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.793, + 0.885, + 0.826 + ], + "angle": 0, + "content": "Sainbayar Sukhbaatar, Rob Fergus, et al. Learning multiagent communication with backpropagation. Advances in neural information processing systems, 29, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.836, + 0.885, + 0.869 + ], + "angle": 0, + "content": "Theodore R. Sumers, Shunyu Yao, Karthik Narasimhan, and Thomas L. Griffiths. Cognitive architectures for language agents, 2024. URL https://arxiv.org/abs/2309.02427." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.878, + 0.885, + 0.927 + ], + "angle": 0, + "content": "Jiashuo Sun, Chengjin Xu, Lumingyuan Tang, Saizhuo Wang, Chen Lin, Yeyun Gong, Heung-Yeung Shum, and Jian Guo. Think-on-graph: Deep and responsible reasoning of large language model with knowledge graph. arXiv preprint arXiv:2307.07697, 2023." + }, + { + "type": "list", + "bbox": [ + 0.113, + 0.103, + 0.885, + 0.927 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "63" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.103, + 0.885, + 0.196 + ], + "angle": 0, + "content": "Jiaxing Sun, Weiquan Huang, Jiang Wu, Chenya Gu, Wei Li, Songyang Zhang, Hang Yan, and Conghui He. Benchmarking Chinese commonsense reasoning of LLMs: From Chinese-specifics to reasoning-memorization correlations. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 11205-11228, Bangkok, Thailand, August 2024a. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.604. URL https://aclanthology.org/2024.acl-long.604/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.205, + 0.885, + 0.237 + ], + "angle": 0, + "content": "Shichao Sun, Junlong Li, Weizhe Yuan, Ruifeng Yuan, Wenjie Li, and Pengfei Liu. The critique of critique. arXiv preprint arXiv:2401.04518, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.245, + 0.885, + 0.293 + ], + "angle": 0, + "content": "Zhiqing Sun, Longhui Yu, Yikang Shen, Weiyang Liu, Yiming Yang, Sean Welleck, and Chuang Gan. Easy-to-hard generalization: Scalable alignment beyond human supervision. CoRR, abs/2403.09472, 2024c. doi: 10.48550/ARXIV.2403.09472. URL https://doi.org/10.48550/arXiv.2403.09472." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.301, + 0.73, + 0.318 + ], + "angle": 0, + "content": "Richard S Sutton. Reinforcement learning: An introduction. A Bradford Book, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.327, + 0.885, + 0.358 + ], + "angle": 0, + "content": "Mirac Suzgun and Adam Tauman Kalai. Meta-prompting: Enhancing language models with task-agnostic scaffolding. arXiv preprint arXiv:2401.12954, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.368, + 0.885, + 0.399 + ], + "angle": 0, + "content": "Mirac Suzgun and Adam Tauman Kalai. Meta-prompting: Enhancing language models with task-agnostic scaffolding, 2024b. URL https://arxiv.org/abs/2401.12954." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.408, + 0.885, + 0.455 + ], + "angle": 0, + "content": "Sijun Tan, Siyuan Zhuang, Kyle Montgomery, William Y Tang, Alejandro Cuadron, Chenguang Wang, Raluca Ada Popa, and Ion Stoica. Judgebench: A benchmark for evaluating llm-based judges. arXiv preprint arXiv:2410.12784, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.464, + 0.885, + 0.526 + ], + "angle": 0, + "content": "Zhengyang Tang, Xingxing Zhang, Benyou Wang, and Furu Wei. Mathscale: Scaling instruction tuning for mathematical reasoning. In Forty-first International Conference on Machine Learning, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=Kjww7ZN47M." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.535, + 0.885, + 0.582 + ], + "angle": 0, + "content": "Zhengyang Tang, Ziniu Li, Zhenyang Xiao, Tian Ding, Ruoyu Sun, Benyou Wang, Dayiheng Liu, Fei Huang, Tianyu Liu, Bowen Yu, and Junyang Lin. Enabling scalable oversight via self-evolving critic, 2025. URL https://arxiv.org/abs/2501.05727." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.591, + 0.885, + 0.637 + ], + "angle": 0, + "content": "Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li, Carlos Guestrin, Percy Liang, and Tatsunori B. Hashimoto. Stanford alpaca: An instruction-following llama model. https://github.com/tatsu-lab/stanford_alpaca, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.646, + 0.885, + 0.845 + ], + "angle": 0, + "content": "Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, Chuning Tang, Congcong Wang, Dehao Zhang, Enming Yuan, Enzhe Lu, Fengxiang Tang, Flood Sung, Guangda Wei, Guokun Lai, Haiqing Guo, Han Zhu, Hao Ding, Hao Hu, Hao Yang, Hao Zhang, Haotian Yao, Haotian Zhao, Haoyu Lu, Haoze Li, Haozhen Yu, Hongcheng Gao, Huabin Zheng, Huan Yuan, Jia Chen, Jianhang Guo, Jianlin Su, Jianzhou Wang, Jie Zhao, Jin Zhang, Jingyuan Liu, Junjie Yan, Junyan Wu, Lidong Shi, Ling Ye, Longhui Yu, Mengnan Dong, Neo Zhang, Ningchen Ma, Qiwei Pan, Qucheng Gong, Shaowei Liu, Shengling Ma, Shupeng Wei, Sihan Cao, Siying Huang, Tao Jiang, Weihao Gao, Weimin Xiong, Weiran He, Weixiao Huang, Wenhao Wu, Wenyang He, Xianghui Wei, Xianqing Jia, Xingzhe Wu, Xinran Xu, Xinxing Zu, Xinyu Zhou, Xuehai Pan, Y. Charles, Yang Li, Yangyang Hu, Yangyang Liu, Yanru Chen, Yejie Wang, Yibo Liu, Yidao Qin, Yifeng Liu, Ying Yang, Yiping Bao, Yulun Du, Yuxin Wu, Yuzhi Wang, Zaida Zhou, Zhaoji Wang, Zhaowei Li, Zhen Zhu, Zheng Zhang, Zhexu Wang, Zhilin Yang, Zhiqi Huang, Ziyao Xu, and Zonghan Yang. Kimi k1.5: Scaling reinforcement learning with llms, 2025. URL https://arxiv.org/abs/2501.12599." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.853, + 0.885, + 0.885 + ], + "angle": 0, + "content": "Qwen Team. Qwen2.5: A party of foundation models, September 2024. URL https://qwenlm.github.io/blog/qwen2.5/." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.894, + 0.885, + 0.926 + ], + "angle": 0, + "content": "Amitayush Thakur, George Tsoukalas, Yeming Wen, Jimmy Xin, and Swarat Chaudhuri. An in-context learning agent for formal theorem-proving. In Conference on Language Modeling (COLM), 2024." + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.103, + 0.885, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "64" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.103, + 0.882, + 0.134 + ], + "angle": 0, + "content": "The Coq Development Team. The Coq Proof Assistant. 2024. URL https://coq.inria.fr/doc/V8.20.0/refman/index.html. Version 8.20.0." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.142, + 0.884, + 0.221 + ], + "angle": 0, + "content": "Qingyuan Tian, Hanlun Zhu, Lei Wang, Yang Li, and Yunshi Lan. \\(\\mathbf{R}^3\\) prompting: Review, rephrase and resolve for chain-of-thought reasoning in large language models under noisy context. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Findings of the Association for Computational Linguistics: EMNLP 2023, pp. 1670-1685, Singapore, December 2023. Association for Computational Linguistics. doi: 10. 18653/v1/2023-findings-emnlp.114. URL https://aclanthology.org/2023-findings-emnlp.114/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.227, + 0.882, + 0.259 + ], + "angle": 0, + "content": "Ye Tian, Baolin Peng, Linfeng Song, Lifeng Jin, Dian Yu, Haitao Mi, and Dong Yu. Toward self-improvement of llms via imagination, searching, and criticizing. arXiv preprint arXiv:2404.12253, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.267, + 0.882, + 0.313 + ], + "angle": 0, + "content": "Yuxuan Tong, Xiwen Zhang, Rui Wang, Ruidong Wu, and Junxian He. Dart-math: Difficulty-aware rejection tuning for mathematical problem-solving. CoRR, abs/2407.13690, 2024. doi: 10.48550/ARXIV.2407.13690. URL https://doi.org/10.48550/arXiv.2407.13690." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.321, + 0.882, + 0.368 + ], + "angle": 0, + "content": "Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.375, + 0.882, + 0.42 + ], + "angle": 0, + "content": "Vince Trencsenyi, Agnieszka Mensfelt, and Kostas Stathis. Approximating human strategic reasoning with llm-enhanced recursive reasoners leveraging multi-agent hypergames. arXiv preprint arXiv:2502.07443, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.429, + 0.882, + 0.461 + ], + "angle": 0, + "content": "Trieu H Trinh, Yuhuai Wu, Quoc V Le, He He, and Thang Luong. Solving olympiad geometry without human demonstrations. Nature, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.468, + 0.882, + 0.515 + ], + "angle": 0, + "content": "Prapti Trivedi, Aditya Gulati, Oliver Molenschot, Meghana Arakkal Rajeev, Rajkumar Ramamurthy, Keith Stevens, Tanveesh Singh Chaudhery, Jahnavi Jambholkar, James Zou, and Nazneen Rajani. Self-rationalization improves llm as a fine-grained judge. arXiv preprint arXiv:2410.05495, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.523, + 0.882, + 0.57 + ], + "angle": 0, + "content": "Jonathan Uesato, Nate Kushman, Ramana Kumar, Francis Song, Noah Siegel, Lisa Wang, Antonia Creswell, Geoffrey Irving, and Irina Higgins. Solving math word problems with process-and outcome-based feedback. arXiv preprint arXiv:2211.14275, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.577, + 0.882, + 0.609 + ], + "angle": 0, + "content": "Karthik Valmeekam, Matthew Marquez, and Subbarao Kambhampati. Can large language models really improve by self-critiquing their own plans? arXiv preprint arXiv:2310.08118, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.616, + 0.882, + 0.663 + ], + "angle": 0, + "content": "Pat Verga, Sebastian Hofstatter, Sophia Althammer, Yixuan Su, Aleksandra Piktus, Arkady Arkhangorodsky, Minjie Xu, Naomi White, and Patrick Lewis. Replacing judges with juries: Evaluating llm generations with a panel of diverse models. arXiv preprint arXiv:2404.18796, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.671, + 0.882, + 0.717 + ], + "angle": 0, + "content": "Johannes Von Oswald, Eyvind Niklasson, Ettore Randazzo, Joao Sacramento, Alexander Mordvintsev, Andrey Zhmoginov, and Max Vlademyrov. Transformers learn in-context by gradient descent. In International Conference on Machine Learning, pp. 35151-35174. PMLR, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.725, + 0.882, + 0.772 + ], + "angle": 0, + "content": "Tu Vu, Kalpesh Krishna, Salaheddin Alzubi, Chris Tar, Manaal Faruqui, and Yun-Hsuan Sung. Foundational autorators: Taming large language models for better automatic evaluation. arXiv preprint arXiv:2407.10817, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.779, + 0.882, + 0.826 + ], + "angle": 0, + "content": "Xingchen Wan, Ruoxi Sun, Hootan Nakhost, and Sercan O Arik. Teach better or show smarter? on instructions and exemplars in automatic prompt optimization. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024a. URL https://openreview.net/forum?id=IdtoJVWvNx." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.833, + 0.882, + 0.925 + ], + "angle": 0, + "content": "Yuxuan Wan, Wenxuan Wang, Yiliu Yang, Youliang Yuan, Jen-tse Huang, Pinjia He, Wenxiang Jiao, and Michael Lyu. LogicAsker: Evaluating and improving the logical reasoning ability of large language models. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (eds.), Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pp. 2124-2155, Miami, Florida, USA, November 2024b. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.128. URL https://aclanthology.org/2024.emnlp-main.128/." + }, + { + "type": "list", + "bbox": [ + 0.113, + 0.103, + 0.884, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "65" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.103, + 0.885, + 0.166 + ], + "angle": 0, + "content": "Ziyu Wan, Xidong Feng, Muning Wen, Stephen Marcus McAleer, Ying Wen, Weinan Zhang, and Jun Wang. Alphazero-like tree-search can guide large language model decoding and training. In _Forty-first International Conference on Machine Learning_, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net, 2024c. URL https://openreview.net/forum?id=C4OpREezgj." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.175, + 0.885, + 0.268 + ], + "angle": 0, + "content": "Boshi Wang, Sewon Min, Xiang Deng, Jiaming Shen, You Wu, Luke Zettlemoyer, and Huan Sun. Towards understanding chain-of-thought prompting: An empirical study of what matters. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 2717-2739, Toronto, Canada, July 2023a. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.153. URL https://aclanthology.org/2023.acl-long.153/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.277, + 0.885, + 0.31 + ], + "angle": 0, + "content": "Han Wang, Archiki Prasad, Elias Stengel-Eskin, and Mohit Bansal. Soft self-consistency improves language model agents. arXiv preprint arXiv:2402.13212, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.319, + 0.885, + 0.368 + ], + "angle": 0, + "content": "Jiayu Wang, Yifei Ming, Zhenmei Shi, Vibhav Vineet, Xin Wang, Yixuan Li, and Neel Joshi. Is a picture worth a thousand words? delving into spatial reasoning for vision language models. In The Thirty-Eighth Annual Conference on Neural Information Processing Systems, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.377, + 0.885, + 0.409 + ], + "angle": 0, + "content": "Junlin Wang, Jue Wang, Ben Athiwaratkun, Ce Zhang, and James Zou. Mixture-of-agents enhances large language model capabilities, 2024c. URL https://arxiv.org/abs/2406.04692." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.418, + 0.885, + 0.481 + ], + "angle": 0, + "content": "Lei Wang, Chen Ma, Xueyang Feng, Zeyu Zhang, Hao Yang, Jingsen Zhang, Zhiyuan Chen, Jiakai Tang, Xu Chen, Yankai Lin, Wayne Xin Zhao, Zhewei Wei, and Jirong Wen. A survey on large language model based autonomous agents. Frontiers of Computer Science, 18(6), March 2024d. ISSN 2095-2236. doi: 10.1007/s11704-024-40231-1. URL http://dx.doi.org/10.1007/s11704-024-40231-1." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.491, + 0.885, + 0.568 + ], + "angle": 0, + "content": "Liang Wang, Nan Yang, and Furu Wei. Learning to retrieve in-context examples for large language models. In Yvette Graham and Matthew Purver (eds.), Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 1752-1767, St. Julian's, Malta, March 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.eacl-long.105/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.578, + 0.885, + 0.656 + ], + "angle": 0, + "content": "Peifeng Wang, Zhengyang Wang, Zheng Li, Yifan Gao, Bing Yin, and Xiang Ren. SCOTT: Self-consistent chain-of-thought distillation. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 5546-5558, Toronto, Canada, July 2023b. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.304. URL https://aclanthology.org/2023.acl-long.304/." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.665, + 0.885, + 0.698 + ], + "angle": 0, + "content": "Peifeng Wang, Austin Xu, Yilun Zhou, Caiming Xiong, and Shafiq Joty. Direct judgement preference optimization. arXiv preprint arXiv:2409.14664, 2024f." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.707, + 0.885, + 0.784 + ], + "angle": 0, + "content": "Peiyi Wang, Lei Li, Zhihong Shao, Runxin Xu, Damai Dai, Yifei Li, Deli Chen, Yu Wu, and Zhifang Sui. Math-shepherd: Verify and reinforce llms step-by-step without human annotations. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2024, Bangkok, Thailand, August 11-16, 2024, pp. 9426-9439. Association for Computational Linguistics, 2024g. URL https://doi.org/10.18653/v1/2024.acl-long.510." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.794, + 0.885, + 0.826 + ], + "angle": 0, + "content": "Qineng Wang, Zihao Wang, Ying Su, Hanghang Tong, and Yangqiu Song. Rethinking the bounds of llm reasoning: Are multi-agent discussions the key?, 2024h. URL https://arxiv.org/abs/2402.18272." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.836, + 0.885, + 0.869 + ], + "angle": 0, + "content": "Song Wang, Zihan Chen, Chengshuai Shi, Cong Shen, and Jundong Li. Mixture of demonstrations for in-context learning. Advances in Neural Information Processing Systems, 37:88091-88116, 2024i." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.878, + 0.885, + 0.926 + ], + "angle": 0, + "content": "Tianlu Wang, Ping Yu, Xiaoqing Ellen Tan, Sean O'Brien, Ramakanth Pasunuru, Jane Dwivedi-Yu, Olga Golovneva, Luke Zettlemoyer, Maryam Fazel-Zarandi, and Asli Celikyilmaz. Shepherd: A critic for language model generation. arXiv preprint arXiv:2308.04592, 2023c." + }, + { + "type": "list", + "bbox": [ + 0.113, + 0.103, + 0.885, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "66" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.103, + 0.885, + 0.151 + ], + "angle": 0, + "content": "Tianlu Wang, Ilia Kulikov, Olga Golovneva, Ping Yu, Weizhe Yuan, Jane Dwivedi-Yu, Richard Yuanzhe Pang, Maryam Fazel-Zarandi, Jason Weston, and Xian Li. Self-taught evaluators. arXiv preprint arXiv:2408.02666, 2024j." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.161, + 0.885, + 0.207 + ], + "angle": 0, + "content": "Xinyi Wang, Lucas Caccia, Oleksiy Ostapenko, Xingdi Yuan, and Alessandro Sordoni. Guiding language model reasoning with planning tokens. CoRR, abs/2310.05707, 2023d. doi: 10.48550/ARXIV.2310.05707. URL https://doi.org/10.48550/arXiv.2310.05707." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.218, + 0.885, + 0.279 + ], + "angle": 0, + "content": "Xinyi Wang, Wanrong Zhu, Michael Saxon, Mark Steyvers, and William Yang Wang. Large language models are latent variable models: Explaining and finding good demonstrations for in-context learning. In Thirty-seventh Conference on Neural Information Processing Systems, 2023e. URL https://openreview.net/forum?id=BGvkwZEGt7." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.291, + 0.885, + 0.322 + ], + "angle": 0, + "content": "Xuezhi Wang and Denny Zhou. Chain-of-thought reasoning without prompting. arXiv preprint arXiv:2402.10200, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.333, + 0.885, + 0.393 + ], + "angle": 0, + "content": "Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc V Le, Ed H. Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models. In The Eleventh International Conference on Learning Representations, 2023f. URL https://openreview.net/forum?id=1PL1NIMMrw." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.405, + 0.885, + 0.452 + ], + "angle": 0, + "content": "Yidong Wang, Zhuohao Yu, Wenjin Yao, Zhengran Zeng, Linyi Yang, Cunxiang Wang, Hao Chen, Chaoya Jiang, Rui Xie, Jindong Wang, et al. Pandalm: An automatic evaluation benchmark for llm instruction tuning optimization. In The Twelfth International Conference on Learning Representations, 2023g." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.463, + 0.885, + 0.494 + ], + "angle": 0, + "content": "Yuqing Wang and Yun Zhao. Rupbench: Benchmarking reasoning under perturbations for robustness evaluation in large language models. arXiv preprint arXiv:2406.11020, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.505, + 0.885, + 0.581 + ], + "angle": 0, + "content": "Zihan Wang, Yunxuan Li, Yuexin Wu, Liangchen Luo, Le Hou, Hongkun Yu, and Jingbo Shang. Multi-step problem solving through a verifier: An empirical analysis on model-induced process supervision. In Findings of the Association for Computational Linguistics: EMNLP 2024, Miami, Florida, USA, November 12-16, 2024, pp. 7309-7319. Association for Computational Linguistics, 2024k. URL https://aclanthology.org/2024-findings-emnlp.429." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.592, + 0.885, + 0.639 + ], + "angle": 0, + "content": "Zihan Wang, Yunxuan Li, Yuexin Wu, Liangchen Luo, Le Hou, Hongkun Yu, and Jingbo Shang. Multi-step problem solving through a verifier: An empirical analysis on model-induced process supervision. arXiv preprint arXiv:2402.02658, 2024l." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.65, + 0.885, + 0.694 + ], + "angle": 0, + "content": "Zihao Wang, Anji Liu, Haowei Lin, Jiaqi Li, Xiaojian Ma, and Yitao Liang. Rat: Retrieval augmented thoughts elicit context-aware reasoning in long-horizon generation. arXiv preprint arXiv:2403.05313, 2024m." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.707, + 0.885, + 0.768 + ], + "angle": 0, + "content": "Zilong Wang, Hao Zhang, Chun-Liang Li, Julian Martin Eisenschlos, Vincent Perot, Zifeng Wang, Lesly Miculicich, Yasuhisa Fujii, Jingbo Shang, Chen-Yu Lee, and Tomas Pfister. Chain-of-table: Evolving tables in the reasoning chain for table understanding. In The Twelfth International Conference on Learning Representations, 2024n. URL https://openreview.net/forum?id=4L0xnS4GQM." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.779, + 0.885, + 0.811 + ], + "angle": 0, + "content": "Peter Cathcart Wason and Philip Nicholas JohnsonLaird. Psychology of reasoning: Structure and content. Harvard University Press, 86, 1972." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.822, + 0.885, + 0.868 + ], + "angle": 0, + "content": "Jason Wei, Yi Tay, Rishi Bommasani, Colin Raffel, Barret Zoph, Sebastian Borgeaud, Dani Yogatama, Maarten Bosma, Denny Zhou, Donald Metzler, et al. Emergent abilities of large language models. arXiv preprint arXiv:2206.07682, 2022a." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.879, + 0.885, + 0.925 + ], + "angle": 0, + "content": "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837, 2022b." + }, + { + "type": "list", + "bbox": [ + 0.115, + 0.103, + 0.885, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "67" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.103, + 0.885, + 0.165 + ], + "angle": 0, + "content": "Yuxiang Wei, Zhe Wang, Jiawei Liu, Yifeng Ding, and Lingming Zhang. Magicoder: Empowering code generation with OSS-instruct. In Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pp. 52632-52657. PMLR, 21-27 Jul 2024. URL https://proceedings.mlr.press/v235/wei24h.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.177, + 0.885, + 0.223 + ], + "angle": 0, + "content": "Yuxiang Wei, Olivier Duchenne, Jade Copet, Quentin Carbonneaux, Lingming Zhang, Daniel Fried, Gabriel Synnaeve, Rishabh Singh, and Sida I. Wang. Swe-rl: Advancing llm reasoning via reinforcement learning on open software evolution, 2025. URL https://arxiv.org/abs/2502.18449." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.235, + 0.885, + 0.268 + ], + "angle": 0, + "content": "Nathaniel Weir, Muhammad Khalifa, Linlu Qiu, Orion Weller, and Peter Clark. Learning to reason via program generation, emulation, and search. arXiv preprint arXiv:2405.16337, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.279, + 0.885, + 0.325 + ], + "angle": 0, + "content": "Sean Welleck, Amanda Bertsch, Matthew Finlayson, Hailey Schoelkopf, Alex Xie, Graham Neubig, Ilia Kulikov, and Zaid Harchaoui. From decoding to meta-generation: Inference-time algorithms for large language models. arXiv preprint arXiv:2406.16838, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.337, + 0.885, + 0.369 + ], + "angle": 0, + "content": "Ying Wen, Yaodong Yang, Rui Luo, Jun Wang, and Wei Pan. Probabilistic recursive reasoning for multi-agent reinforcement learning. arXiv preprint arXiv:1901.09207, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.381, + 0.885, + 0.411 + ], + "angle": 0, + "content": "Lily Weng. Llm-powered autonomous agents. *Github*, 2023. URL https://lilianweng.github.io/posts/2023-06-23-agent/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.424, + 0.885, + 0.455 + ], + "angle": 0, + "content": "Martin Weyssow, Aton Kamanda, and Houari A. Sahraoui. Codeultrafeedback: An llm-as-a-judge dataset for aligning large language models to coding preferences. CoRR, abs/2403.09032, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.468, + 0.885, + 0.499 + ], + "angle": 0, + "content": "Sarah Wegreffe, Ana Marasovic, and Noah A Smith. Measuring association between labels and free-text rationales. arXiv preprint arXiv:2010.12762, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.511, + 0.885, + 0.603 + ], + "angle": 0, + "content": "Sarah Wiegrefe, Jack Hessel, Swabha Swayamdipta, Mark Riedl, and Yejin Choi. Reframing human-AI collaboration for generating free-text explanations. In Marine Carpuat, Marie-Catherine de Marneffe, and Ivan Vladimir Meza Ruiz (eds.), Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 632-658, Seattle, United States, July 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.naacl-main.47. URL https://aclanthology.org/2022.naacl-main.47/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.615, + 0.885, + 0.646 + ], + "angle": 0, + "content": "Ronald J Williams. Simple statistical gradient-following algorithms for connectionist reinforcement learning. Machine learning, 8:229-256, 1992." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.658, + 0.885, + 0.703 + ], + "angle": 0, + "content": "Yuhuai Wu, Albert Jiang, Wenda Li, Markus Rabe, Charles Staats, Mateja Jamnik, and Christian Szegedy. Autoformalization with large language models. In Neural Information Processing Systems (NeurIPS), 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.716, + 0.885, + 0.823 + ], + "angle": 0, + "content": "Zhaofeng Wu, Linlu Qiu, Alexis Ross, Ekin Akyurek, Boyuan Chen, Bailin Wang, Najoung Kim, Jacob Andreas, and Yoon Kim. Reasoning or reciting? exploring the capabilities and limitations of language models through counterfactual tasks. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 1819-1862, Mexico City, Mexico, June 2024a. Association for Computational Linguistics. doi: 10.18653/v1/2024.naacl-long.102. URL https://aclanthology.org/2024.naacl-long.102/." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.836, + 0.885, + 0.868 + ], + "angle": 0, + "content": "Zhenyu Wu, Qingkai Zeng, Zhihan Zhang, Zhaoxuan Tan, Chao Shen, and Meng Jiang. Enhancing mathematical reasoning in llms by stepwise correction. arXiv preprint arXiv:2410.12934, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.879, + 0.885, + 0.925 + ], + "angle": 0, + "content": "Zijian Wu, Suozhi Huang, Zhejian Zhou, Huaiyuan Ying, Jiayu Wang, Dahua Lin, and Kai Chen. Internl m2. 5-stepprover: Advancing automated theorem proving via expert iteration on large-scale lean problems. arXiv preprint arXiv:2410.15700, 2024c." + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.103, + 0.885, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "68" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.103, + 0.885, + 0.181 + ], + "angle": 0, + "content": "Zhiheng Xi, Wenxiang Chen, Xin Guo, Wei He, Yiwen Ding, Boyang Hong, Ming Zhang, Junzhe Wang, Senjie Jin, Enyu Zhou, Rui Zheng, Xiaoran Fan, Xiao Wang, Limao Xiong, Yuhao Zhou, Weiran Wang, Changhao Jiang, Yicheng Zou, Xiangyang Liu, Zhangyue Yin, Shihan Dou, Rongxiang Weng, Wensen Cheng, Qi Zhang, Wenjuan Qin, Yongyan Zheng, Xipeng Qiu, Xuanjing Huang, and Tao Gui. The rise and potential of large language model based agents: A survey. arXiv preprint arXiv:2309.07864, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.192, + 0.885, + 0.27 + ], + "angle": 0, + "content": "Zhiheng Xi, Dingwen Yang, Jixuan Huang, Jiafu Tang, Guanyu Li, Yiwen Ding, Wei He, Boyang Hong, Shihan Do, Wenyu Zhan, Xiao Wang, Rui Zheng, Tao Ji, Xiaowei Shi, Yitao Zhai, Rongxiang Weng, Jingang Wang, Xunliang Cai, Tao Gui, Zuxuan Wu, Qi Zhang, Xipeng Qiu, Xuanjing Huang, and YuGang Jiang. Enhancing llm reasoning via critique models with test-time and training-time supervision, 2024. URL https://arxiv.org/abs/2411.16579." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.281, + 0.885, + 0.313 + ], + "angle": 0, + "content": "Sang Michael Xie, Aditi Raghunathan, Percy Liang, and Tengyu Ma. An explanation of in-context learning as implicit bayesian inference. In International Conference on Learning Representations, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.324, + 0.885, + 0.355 + ], + "angle": 0, + "content": "Zhihui Xie, Liyu Chen, Weichao Mao, Jingjing Xu, Lingpeng Kong, et al. Teaching language models to critique via reinforcement learning. arXiv preprint arXiv:2502.03492, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.367, + 0.885, + 0.428 + ], + "angle": 0, + "content": "Huajian Xin, Daya Guo, Zhihong Shao, Zhizhou Ren, Qihao Zhu, Bo Liu, Chong Ruan, Wenda Li, and Xiaodan Liang. Deepseek-prover: Advancing theorem proving in llms through large-scale synthetic data. CoRR, abs/2405.14333, 2024a. doi: 10.48550/ARXIV.2405.14333. URL https://doi.org/10.48550/arXiv.2405.14333." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.44, + 0.885, + 0.501 + ], + "angle": 0, + "content": "Huajian Xin, ZZ Ren, Junxiao Song, Zhihong Shao, Wanjia Zhao, Haocheng Wang, Bo Liu, Liyue Zhang, Xuan Lu, Qiushi Du, et al. Deepseek-prover-v1. 5: Harnessing proof assistant feedback for reinforcement learning and monte-carlo tree search. arXiv preprint arXiv:2408.08152, 2024b. URL https://arxiv.org/abs/2408.08152." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.513, + 0.885, + 0.545 + ], + "angle": 0, + "content": "Wei Xiong, Hanning Zhang, Chenlu Ye, Lichang Chen, Nan Jiang, and Tong Zhang. Self-rewarding correction for mathematical reasoning, 2025. URL https://arxiv.org/abs/2502.19613." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.557, + 0.885, + 0.602 + ], + "angle": 0, + "content": "Austin Xu, Srijan Bansal, Yifei Ming, Semih Yavuz, and Shafiq Joty. Does context matter? contextual judgebench for evaluating llm-based judges in contextual settings. arXiv preprint arXiv:2503.15620, 2025a." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.615, + 0.885, + 0.647 + ], + "angle": 0, + "content": "Austin Xu, Yilun Zhou, Xuan-Phi Nguyen, Caiming Xiong, and Shafiq Joty. J4r: Learning to judge with equivalent initial state group relative policy optimization. arXiv preprint arXiv:2505.13346, 2025b." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.658, + 0.885, + 0.72 + ], + "angle": 0, + "content": "Can Xu, Qingfeng Sun, Kai Zheng, Xiubo Geng, Pu Zhao, Jiazhan Feng, Chongyang Tao, Qingwei Lin, and Daxin Jiang. Wizardlm: Empowering large pre-trained language models to follow complex instructions. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.732, + 0.885, + 0.779 + ], + "angle": 0, + "content": "Fangzhi Xu, Qika Lin, Jiawei Han, Tianzhe Zhao, Jun Liu, and Erik Cambria. Are large language models really good logical reasoners? a comprehensive evaluation and beyond. IEEE Transactions on Knowledge and Data Engineering, 2025c." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.79, + 0.885, + 0.837 + ], + "angle": 0, + "content": "Fengli Xu, Qianyue Hao, Zefang Zong, Jingwei Wang, Yunke Zhang, Jingyi Wang, Xiaochong Lan, Jiahui Gong, Tianjian Ouyang, Fanjin Meng, et al. Towards large reasoning models: A survey of reinforced reasoning with large language models. arXiv preprint arXiv:2501.09686, 2025d." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.849, + 0.885, + 0.925 + ], + "angle": 0, + "content": "Hanwei Xu, Yujun Chen, Yulun Du, Nan Shao, Wang Yanggang, Haiyu Li, and Zhilin Yang. GPS: Genetic prompt search for efficient few-shot learning. In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang (eds.), Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 8162-8171, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.emnlp-main.559. URL https://aclanthology.org/2022.emnlp-main.559/." + }, + { + "type": "list", + "bbox": [ + 0.113, + 0.103, + 0.885, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "69" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.103, + 0.885, + 0.151 + ], + "angle": 0, + "content": "Haotian Xu, Xing Wu, Weinong Wang, Zhongzhi Li, Da Zheng, Boyuan Chen, Yi Hu, Shijia Kang, Jiaming Ji, Yingying Zhang, et al. Redstar: Does scaling long-cot data unlock better slow-reasoning systems? arXiv preprint arXiv:2501.11284, 2025e." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.157, + 0.885, + 0.205 + ], + "angle": 0, + "content": "Kehan Xu, Kun Zhang, Jingyuan Li, Wei Huang, and Yuanzhuo Wang. Crp-rag: A retrieval-augmented generation framework for supporting complex logical reasoning and knowledge planning. _Electronics_, 14 (1):47, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.21, + 0.885, + 0.257 + ], + "angle": 0, + "content": "Xiaohan Xu, Ming Li, Chongyang Tao, Tao Shen, Reynold Cheng, Jinyang Li, Can Xu, Dacheng Tao, and Tianyi Zhou. A survey on knowledge distillation of large language models. arXiv preprint arXiv:2402.13116, 2024c." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.265, + 0.882, + 0.297 + ], + "angle": 0, + "content": "Yige Xu, Xu Guo, Zhiwei Zeng, and Chunyan Miao. SoftCoT: Soft chain-of-thought for efficient reasoning with llms. arXiv preprint arXiv:2502.12134, 2025f." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.303, + 0.882, + 0.351 + ], + "angle": 0, + "content": "Zhiwei Xu, Yunpeng Bai, Bin Zhang, Dapeng Li, and Guoliang Fan. Haven: Hierarchical cooperative multiagent reinforcement learning with dual coordination mechanism. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 37, pp. 11735-11743, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.357, + 0.882, + 0.403 + ], + "angle": 0, + "content": "Yuchen Yan, Jin Jiang, Yang Liu, Yixin Cao, Xin Xu, Xunliang Cai, Jian Shao, et al. S3c-math: Spontaneous step-level self-correction makes large language models better mathematical reasoners. arXiv preprint arXiv:2409.01524, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.411, + 0.882, + 0.443 + ], + "angle": 0, + "content": "An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, et al. Qwen2 technical report. arXiv preprint arXiv:2407.10671, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.45, + 0.882, + 0.496 + ], + "angle": 0, + "content": "An Yang, Beichen Zhang, Binyuan Hui, Bofei Gao, Bowen Yu, Chengpeng Li, Dayiheng Liu, Jianhong Tu, Jingren Zhou, Junyang Lin, et al. Qwen2. 5-math technical report: Toward mathematical expert model via self-improvement. arXiv preprint arXiv:2409.12122, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.503, + 0.882, + 0.55 + ], + "angle": 0, + "content": "Chengrun Yang, Xuezhi Wang, Yifeng Lu, Hanxiao Liu, Quoc V Le, Denny Zhou, and Xinyun Chen. Large language models as optimizers. In The Twelfth International Conference on Learning Representations, 2024c. URL https://openreview.net/forum?id=Bb4VGOWELI." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.557, + 0.882, + 0.589 + ], + "angle": 0, + "content": "Jinghan Yang, Shuming Ma, and Furu Wei. Auto-icl: In-context learning without human supervision. arXiv preprint arXiv:2311.09263, 2023a. URL https://arxiv.org/abs/2311.09263." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.596, + 0.882, + 0.642 + ], + "angle": 0, + "content": "Kaiyu Yang, Aidan Swope, Alex Gu, Rahul Chalamala, Peiyang Song, Shixing Yu, Saad Godil, Ryan Prenger, and Anima Anandkumar. LeanDojo: Theorem proving with retrieval-augmented language models. In Neural Information Processing Systems (NeurIPS), 2023b." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.649, + 0.882, + 0.682 + ], + "angle": 0, + "content": "Kaiyu Yang, Gabriel Poesia, Jingxuan He, Wenda Li, Kristin Lauter, Swarat Chaudhuri, and Dawn Song. Formal mathematical reasoning: A new frontier in ai. arXiv preprint arXiv:2412.16075, 2024d." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.688, + 0.882, + 0.734 + ], + "angle": 0, + "content": "Ruihan Yang, Jiangjie Chen, Yikai Zhang, Siyu Yuan, Aili Chen, Kyle Richardson, Yanghua Xiao, and Deqing Yang. Selfgoal: Your language agents already know how to achieve high-level goals. arXiv preprint arXiv:2406.04784, 2024e." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.741, + 0.882, + 0.819 + ], + "angle": 0, + "content": "Zonglin Yang, Li Dong, Xinya Du, Hao Cheng, Erik Cambria, Xiaodong Liu, Jianfeng Gao, and Furu Wei. Language models as inductive reasoners. In Yvette Graham and Matthew Purver (eds.), Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 209-225, St. Julian's, Malta, March 2024f. Association for Computational Linguistics. URL https://aclanthology.org/2024.eacl-long.13/." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.826, + 0.882, + 0.858 + ], + "angle": 0, + "content": "Shunyu Yao and Karthik Narasimhan. Language agents in the digital world: Opportunities and risks. _princeton-nlp.github.io_, Jul 2023. URL https://princeton-nlp.github.io/language-agent-impact/." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.864, + 0.882, + 0.924 + ], + "angle": 0, + "content": "Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Thomas L. Griffiths, Yuan Cao, and Karthik R Narasimhan. Tree of thoughts: Deliberate problem solving with large language models. In Thirty-seventh Conference on Neural Information Processing Systems, 2023a. URL https://openreview.net/forum?id=5Xc1ecx01h." + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.103, + 0.885, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "70" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.103, + 0.885, + 0.151 + ], + "angle": 0, + "content": "Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik Narasimhan, and Yuan Cao. Re-Act: Synergizing reasoning and acting in language models. In International Conference on Learning Representations (ICLR), 2023b." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.16, + 0.885, + 0.209 + ], + "angle": 0, + "content": "Weiran Yao, Shelby Heinecke, Juan Carlos Niebles, Zhiwei Liu, Yihao Feng, Le Xue, Rithesh Murthy, Zeyuan Chen, Jianguo Zhang, Devansh Arpit, et al. Retroformer: Retrospective large language agents with policy gradient optimization. arXiv preprint arXiv:2308.02151, 2023c." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.218, + 0.885, + 0.267 + ], + "angle": 0, + "content": "Michihiro Yasunaga, Xinyun Chen, Yujia Li, Panupong Pasupat, Jure Leskovec, Percy Liang, Ed H. Chi, and Denny Zhou. Large language models as analogical reasoners. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=AgDICX1h50." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.275, + 0.885, + 0.324 + ], + "angle": 0, + "content": "He Ye, Matias Martinez, Xiapu Luo, Tao Zhang, and Martin Monperrus. Selfapr: Self-supervised program repair with test execution diagnostics. In Proceedings of the 37th IEEE/ACM International Conference on Automated Software Engineering, pp. 1-13, 2022. URL https://arxiv.org/abs/2203.12755." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.332, + 0.885, + 0.365 + ], + "angle": 0, + "content": "Jiacheng Ye, Zhiyong Wu, Jiangtao Feng, Tao Yu, and Lingpeng Kong. Compositional exemplars for in-context learning. In International Conference on Machine Learning, pp. 39818-39833. PMLR, 2023a." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.374, + 0.885, + 0.408 + ], + "angle": 0, + "content": "Yixin Ye, Zhen Huang, Yang Xiao, Ethan Chern, Shijie Xia, and Pengfei Liu. Limo: Less is more for reasoning. arXiv preprint arXiv:2502.03387, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.417, + 0.885, + 0.495 + ], + "angle": 0, + "content": "Yunhu Ye, Binyuan Hui, Min Yang, Binhua Li, Fei Huang, and Yongbin Li. Large language models are versatile decomposers: Decomposing evidence and questions for table-based reasoning. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '23, pp. 174-184, New York, NY, USA, 2023b. Association for Computing Machinery. ISBN 9781450394086. doi: 10.1145/3539618.3591708. URL https://doi.org/10.1145/3539618.3591708." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.504, + 0.885, + 0.582 + ], + "angle": 0, + "content": "Yunhu Ye, Binyuan Hui, Min Yang, Binhua Li, Fei Huang, and Yongbin Li. Large language models are versatile decomposers: Decomposing evidence and questions for table-based reasoning. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '23, pp. 174-184, New York, NY, USA, 2023c. Association for Computing Machinery. ISBN 9781450394086. doi: 10.1145/3539618.3591708. URL https://doi.org/10.1145/3539618.3591708." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.591, + 0.885, + 0.669 + ], + "angle": 0, + "content": "Yunhu Ye, Binyuan Hui, Min Yang, Binhua Li, Fei Huang, and Yongbin Li. Large language models are versatile decomposers: Decomposing evidence and questions for table-based reasoning. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '23, pp. 174-184, New York, NY, USA, 2023d. Association for Computing Machinery. ISBN 9781450394086. doi: 10.1145/3539618.3591708. URL https://doi.org/10.1145/3539618.3591708." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.679, + 0.885, + 0.726 + ], + "angle": 0, + "content": "Ziyi Ye, Xiangsheng Li, Qiuchi Li, Qingyao Ai, Yujia Zhou, Wei Shen, Dong Yan, and Yiqun Liu. Beyond scalar reward model: Learning generative judge from preference data. arXiv preprint arXiv:2410.03742, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.736, + 0.885, + 0.77 + ], + "angle": 0, + "content": "Edward Yeo, Yuxuan Tong, Morry Niu, Graham Neubig, and Xiang Yue. Demystifying long chain-of-thought reasoning in llms. arXiv preprint arXiv:2502.03373, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.779, + 0.885, + 0.828 + ], + "angle": 0, + "content": "Shuo Yin, Weihao You, Zhilong Ji, Guoqiang Zhong, and Jinfeng Bai. Mumath-code: Combining tool-use large language models with multi-perspective data augmentation for mathematical reasoning. arXiv preprint arXiv:2405.07551, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.836, + 0.885, + 0.87 + ], + "angle": 0, + "content": "Fei Yu, Hongbo Zhang, Prayag Tiwari, and Benyou Wang. Natural language reasoning, a survey. ACM Computing Surveys, 56(12):1-39, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.878, + 0.885, + 0.927 + ], + "angle": 0, + "content": "Longhui Yu, Weisen Jiang, Han Shi, Jincheng Yu, Zhengying Liu, Yu Zhang, James T Kwok, Zhenguo Li, Adrian Weller, and Weiyang Liu. Metamath: Bootstrap your own mathematical questions for large language models. arXiv preprint arXiv:2309.12284, 2023a." + }, + { + "type": "list", + "bbox": [ + 0.113, + 0.103, + 0.885, + 0.927 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "71" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.103, + 0.885, + 0.151 + ], + "angle": 0, + "content": "Longhui Yu, Weisen Jiang, Han Shi, YU Jincheng, Zhengying Liu, Yu Zhang, James Kwok, Zhenguo Li, Adrian Weller, and Weiyang Liu. MetaMath: Bootstrap your own mathematical questions for large language models. In International Conference on Learning Representations (ICLR), 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.159, + 0.885, + 0.221 + ], + "angle": 0, + "content": "Longhui Yu, Weisen Jiang, Han Shi, Jincheng Yu, Zhengying Liu, Yu Zhang, James T. Kwok, Zhenguo Li, Adrian Weller, and Weiyang Liu. Metamath: Bootstrap your own mathematical questions for large language models. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024c." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.229, + 0.885, + 0.321 + ], + "angle": 0, + "content": "Qiying Yu, Zheng Zhang, Ruofei Zhu, Yufeng Yuan, Xiaochen Zuo, Yu Yue, Tiantian Fan, Gaohong Liu, Lingjun Liu, Xin Liu, Haibin Lin, Zhiqi Lin, Bole Ma, Guangming Sheng, Yuxuan Tong, Chi Zhang, Mofan Zhang, Wang Zhang, Hang Zhu, Jinhua Zhu, Jiaze Chen, Jiangjie Chen, Chengyi Wang, Hongli Yu, Weinan Dai, Yuxuan Song, Xiangpeng Wei, Hao Zhou, Jingjing Liu, Wei-Ying Ma, Ya-Qin Zhang, Lin Yan, Mu Qiao, Yonghui Wu, and Mingxuan Wang. Dapo: An open-source llm reinforcement learning system at scale, 2025. URL https://arxiv.org/abs/2503.14476." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.33, + 0.885, + 0.362 + ], + "angle": 0, + "content": "Zhouliang Yu, Jie Fu, Yao Mu, Chenguang Wang, Lin Shao, and Yaodong Yang. Multireact: Multimodal tools augmented reasoning-acting traces for embodied agent planning. 2023b." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.37, + 0.885, + 0.416 + ], + "angle": 0, + "content": "Zhuohao Yu, Chang Gao, Wenjin Yao, Yidong Wang, Wei Ye, Jindong Wang, Xing Xie, Yue Zhang, and Shikun Zhang. Kieval: A knowledge-grounded interactive evaluation framework for large language models. arXiv preprint arXiv:2402.15043, 2024d." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.426, + 0.885, + 0.487 + ], + "angle": 0, + "content": "Lifan Yuan, Ganqu Cui, Hanbin Wang, Ning Ding, Xingyao Wang, Jia Deng, Boji Shan, Huimin Chen, Ruobing Xie, Yankai Lin, Zhenghao Liu, Bowen Zhou, Hao Peng, Zhiyuan Liu, and Maosong Sun. Advancing LLM reasoning generalists with preference trees. CoRR, abs/2404.02078, 2024a. doi: 10.48550/ARXIV.2404.02078. URL https://doi.org/10.48550/arXiv.2404.02078." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.496, + 0.885, + 0.541 + ], + "angle": 0, + "content": "Lifan Yuan, Wendi Li, Huayu Chen, Ganqu Cui, Ning Ding, Kaiyan Zhang, Bowen Zhou, Zhiyuan Liu, and Hao Peng. Free process rewards without process labels, 2024b. URL https://arxiv.org/abs/2412.01981." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.551, + 0.885, + 0.584 + ], + "angle": 0, + "content": "Siyu Yuan, Kaitao Song, Jiangjie Chen, Xu Tan, Dongsheng Li, and Deqing Yang. Evoagent: Towards automatic multi-agent generation via evolutionary algorithms. arXiv preprint arXiv:2406.14228, 2024c." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.592, + 0.885, + 0.624 + ], + "angle": 0, + "content": "Weizhe Yuan, Richard Yuanzhe Pang, Kyunghyun Cho, Sainbayar Sukhbaatar, Jing Xu, and Jason Weston. Self-rewarding language models. arXiv preprint arXiv:2401.10020, 2024d." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.632, + 0.885, + 0.679 + ], + "angle": 0, + "content": "Zheng Yuan, Hongyi Yuan, Chengpeng Li, Guanting Dong, Chuanqi Tan, and Chang Zhou. Scaling relationship on learning mathematical reasoning with large language models. CoRR, abs/2308.01825, 2023. doi: 10.48550/ARXIV.2308.01825. URL https://doi.org/10.48550/arXiv.2308.01825." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.687, + 0.885, + 0.735 + ], + "angle": 0, + "content": "Murong Yue, Wenlin Yao, Haitao Mi, Dian Yu, Ziyu Yao, and Dong Yu. DOTS: Learning to reason dynamically in LLMs via optimal reasoning trajectories search. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=tn2mjzjSyR." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.743, + 0.885, + 0.775 + ], + "angle": 0, + "content": "Xiang Yue, Xingwei Qu, Ge Zhang, Yao Fu, Wenhao Huang, Huan Sun, Yu Su, and Wenhu Chen. Mammoth: Building math generalist models through hybrid instruction tuning. arXiv preprint arXiv:2309.05653, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.783, + 0.885, + 0.831 + ], + "angle": 0, + "content": "Eric Zelikman, Yuhuai Wu, Jesse Mu, and Noah Goodman. STar: Bootstrapping reasoning with reasoning. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho (eds.), Advances in Neural Information Processing Systems, 2022. URL https://openreview.net/forum?id=3ELRdg2sgI." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.839, + 0.885, + 0.885 + ], + "angle": 0, + "content": "Zhiyuan Zeng, Qinyuan Cheng, Zhangyue Yin, Bo Wang, Shimin Li, Yunhua Zhou, Qipeng Guo, Xuanjing Huang, and Xipeng Qiu. Scaling of search and learning: A roadmap to reproduce o1 from reinforcement learning perspective. arXiv preprint arXiv:2412.14135, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.894, + 0.885, + 0.926 + ], + "angle": 0, + "content": "Dan Zhang, Sining Zhoubian, Ziniu Hu, Yisong Yue, Yuxiao Dong, and Jie Tang. Rest-mcts*: Llm self-training via process reward guided tree search. arXiv preprint arXiv:2406.03816, 2024a." + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.103, + 0.885, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "72" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.103, + 0.885, + 0.165 + ], + "angle": 0, + "content": "Di Zhang, Jianbo Wu, Jingdi Lei, Tong Che, Jiatong Li, Tong Xie, Xiaoshui Huang, Shufei Zhang, Marco Pavone, Yuqiang Li, Wanli Ouyang, and Dongzhan Zhou. Llama-berry: Pairwise optimization for o1-like olympiad-level mathematical reasoning. CoRR, abs/2410.02884, 2024b. URL https://doi.org/10.48550/arXiv.2410.02884." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.174, + 0.885, + 0.222 + ], + "angle": 0, + "content": "Jiayi Zhang, Jinyu Xiang, Zhaoyang Yu, Fengwei Teng, Xionghui Chen, Jiaqi Chen, Mingchen Zhuge, Xin Cheng, Sirui Hong, Jinlin Wang, Bingnan Zheng, Bang Liu, Yuyu Luo, and Chenglin Wu. Aflow: Automating agentic workflow generation, 2024c. URL https://arxiv.org/abs/2410.10762." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.228, + 0.885, + 0.261 + ], + "angle": 0, + "content": "Jun Zhang, Trey Hedden, and Adrian Chia. Perspective-taking and depth of theory-of-mind reasoning in sequential-move games. Cognitive science, 36(3):560-573, 2012." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.269, + 0.885, + 0.302 + ], + "angle": 0, + "content": "Kexun Zhang, Shang Zhou, Danqing Wang, William Yang Wang, and Lei Li. Scaling llm inference with optimized sample compute allocation. arXiv preprint arXiv:2410.22480, 2024d." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.309, + 0.885, + 0.358 + ], + "angle": 0, + "content": "Lunjun Zhang, Arian Hosseini, Hritik Bansal, Mehran Kazemi, Aviral Kumar, and Rishabh Agarwal. Generative verifiers: Reward modeling as next-token prediction. In The 4th Workshop on Mathematical Reasoning and AI at NeurIPS'24, 2024e. URL https://openreview.net/forum?id=CxHRoTLmPX." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.365, + 0.885, + 0.398 + ], + "angle": 0, + "content": "Lunjun Zhang, Arian Hosseini, Hritik Bansal, Mehran Kazemi, Aviral Kumar, and Rishabh Agarwal. Generative verifiers: Reward modeling as next-token prediction. arXiv preprint arXiv:2408.15240, 2024f." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.405, + 0.885, + 0.438 + ], + "angle": 0, + "content": "Qizhen Zhang, Chris Lu, Animesh Garg, and Jakob Foerster. Centralized model and exploration policy for multi-agent rl. arXiv preprint arXiv:2107.06434, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.445, + 0.885, + 0.493 + ], + "angle": 0, + "content": "Wentao Zhang, Lingxuan Zhao, Haochong Xia, Shuo Sun, Jiaze Sun, Molei Qin, Xinyi Li, Yuqing Zhao, Yilei Zhao, Xinyu Cai, et al. Finagent: A multimodal foundation agent for financial trading: Tool-augmented, diversified, and generalist. arXiv preprint arXiv:2402.18485, 2024g." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.501, + 0.885, + 0.548 + ], + "angle": 0, + "content": "Xuan Zhang, Chao Du, Tianyu Pang, Qian Liu, Wei Gao, and Min Lin. Chain of preference optimization: Improving chain-of-thought reasoning in llms. CoRR, abs/2406.09136, 2024h. doi: 10.48550/ARXIV.2406.09136. URL https://doi.org/10.48550/arXiv.2406.09136." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.556, + 0.885, + 0.589 + ], + "angle": 0, + "content": "Xuanliang Zhang, Dingzirui Wang, Longxu Dou, Qingfu Zhu, and Wanxiang Che. A survey of table reasoning with large language models. Frontiers of Computer Science, 19(9):199348, 2025a." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.597, + 0.885, + 0.642 + ], + "angle": 0, + "content": "Yufeng Zhang, Fengzhuo Zhang, Zhuoran Yang, and Zhaoran Wang. What and how does in-context learning learn? bayesian model averaging, parameterization, and generalization. arXiv preprint arXiv:2305.19420, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.652, + 0.885, + 0.73 + ], + "angle": 0, + "content": "Yunxiang Zhang, Muhammad Khalifa, Lajanugen Logeswaran, Jaekyeom Kim, Moontae Lee, Honglak Lee, and Lu Wang. Small language models need strong verifiers to self-correct reasoning. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Findings of the Association for Computational Linguistics: ACL 2024, pp. 15637-15653, Bangkok, Thailand, August 2024i. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-acl.924. URL https://aclanthology.org/2024-findings-acl.924/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.737, + 0.885, + 0.831 + ], + "angle": 0, + "content": "Zhehao Zhang, Yan Gao, and Jian-Guang Lou. \\(e^5\\): Zero-shot hierarchical table analysis using augmented LLMs via explain, extract, execute, exhibit and extrapolate. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 1244-1258, Mexico City, Mexico, June 2024j. Association for Computational Linguistics. doi: 10.18653/v1/2024.naacl-long.68. URL https://aclanthology.org/2024.naacl-long.68/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.838, + 0.885, + 0.886 + ], + "angle": 0, + "content": "Zhenru Zhang, Chujie Zheng, Yangzhen Wu, Beichen Zhang, Runji Lin, Bowen Yu, Dayiheng Liu, Jingren Zhou, and Junyang Lin. The lessons of developing process reward models in mathematical reasoning. arXiv preprint arXiv:2501.07301, 2025b." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.893, + 0.885, + 0.926 + ], + "angle": 0, + "content": "Ruochen Zhao, Xingxuan Li, Shafiq Joty, Chengwei Qin, and Lidong Bing. Verify-and-edit: A knowledge-enhanced chain-of-thought framework. arXiv preprint arXiv:2305.03268, 2023." + }, + { + "type": "list", + "bbox": [ + 0.113, + 0.103, + 0.885, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "73" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.103, + 0.885, + 0.15 + ], + "angle": 0, + "content": "Chujie Zheng, Zhenru Zhang, Beichen Zhang, Runji Lin, Keming Lu, Bowen Yu, Dayiheng Liu, Jingren Zhou, and Junyang Lin. Processbench: Identifying process errors in mathematical reasoning. CoRR, abs/2412.06559, 2024. URL https://arxiv.org/abs/2412.06559." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.161, + 0.885, + 0.27 + ], + "angle": 0, + "content": "Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric P. Xing, Hao Zhang, Joseph E. Gonzalez, and Ion Stoica. Judging llmas-a-judge with mt-bench and chatbot arena. In Alice Oh, Tristan Naumann, Amir Globerson, Kate Saenko, Moritz Hardt, and Sergey Levine (eds.), Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023a. URL http://papers.nips.cc/paper_files/paper/2023/bit/91f18a1287b398d378ef22505bf41832-AAbstract-Datasets_and_Benchmarks.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.28, + 0.885, + 0.325 + ], + "angle": 0, + "content": "Rui Zheng, Shihan Dou, Songyang Gao, Yuan Hua, Wei Shen, Binghai Wang, Yan Liu, Senjie Jin, Qin Liu, Yuhao Zhou, et al. Secrets of rlhf in large language models part i: Ppo. arXiv preprint arXiv:2307.04964, 2023b." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.338, + 0.885, + 0.401 + ], + "angle": 0, + "content": "Andy Zhou, Kai Yan, Michal Shlapentokh-Rothman, Haohan Wang, and Yu-Xiong Wang. Language agent tree search unifies reasoning, acting, and planning in language models. In Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pp. 62138-62160. PMLR, 21-27 Jul 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.411, + 0.885, + 0.474 + ], + "angle": 0, + "content": "Denny Zhou, Nathanael Scharli, Le Hou, Jason Wei, Nathan Scales, Xuezhi Wang, Dale Schuurmans, Claire Cui, Olivier Bousquet, Quoc V Le, and Ed H. Chi. Least-to-most prompting enables complex reasoning in large language models. In The Eleventh International Conference on Learning Representations, 2023a. URL https://openreview.net/forum?id=WZH7099tgfM." + }, + { + "type": "ref_text", + "bbox": [ + 0.113, + 0.485, + 0.885, + 0.532 + ], + "angle": 0, + "content": "Han Zhou, Xingchen Wan, Ruoxi Sun, Hamid Palangi, Shariq Iqbal, Ivan Vulic, Anna Korhonen, and Sercan Ö. Ark. Multi-agent design: Optimizing agents with better prompts and topologies, 2025a. URL https://arxiv.org/abs/2502.02533." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.543, + 0.885, + 0.605 + ], + "angle": 0, + "content": "Pei Zhou, Jay Pujara, Xiang Ren, Xinyun Chen, Heng-Tze Cheng, Quoc V Le, Ed H. Chi, Denny Zhou, Swaroop Mishra, and Steven Zheng. SELF-DISCOVER: Large language models self-compose reasoning structures. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024b. URL https://openreview.net/forum?id=BR0vXhmzYK." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.617, + 0.885, + 0.662 + ], + "angle": 0, + "content": "Yilun Zhou, Austin Xu, Peifeng Wang, Caiming Xiong, and Shafiq Joty. Evaluating judges as evaluators: The jetst's benchmark of llm-as-judges as test-time scaling evaluators. arXiv preprint arXiv:2504.15253, 2025b." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.675, + 0.885, + 0.723 + ], + "angle": 0, + "content": "Yongchao Zhou, Andrei Ioan Muresanu, Ziwen Han, Keiran Paster, Silviu Pitis, Harris Chan, and Jimmy Ba. Large language models are human-level prompt engineers. In The Eleventh International Conference on Learning Representations, 2023b. URL https://openreview.net/forum?id=92gvk82DE-." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.733, + 0.885, + 0.781 + ], + "angle": 0, + "content": "Yuxiang Zhou, Jiazheng Li, Yanzheng Xiang, Hanqi Yan, Lin Gui, and Yulan He. The mystery of in-context learning: A comprehensive survey on interpretation and analysis. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pp. 14365-14378, 2024c." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.792, + 0.885, + 0.84 + ], + "angle": 0, + "content": "Qihao Zhu, Daya Guo, Zhihong Shao, Dejian Yang, Peiyi Wang, Runxin Xu, Y Wu, Yukun Li, Huazuo Gao, Shirong Ma, et al. Deepseek-coder-v2: Breaking the barrier of closed-source models in code intelligence. arXiv preprint arXiv:2406.11931, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.85, + 0.885, + 0.883 + ], + "angle": 0, + "content": "Ying Zhu, Shengchang Li, Ziqian Kong, and Peilan Xu. Graph retrieval augmented trustworthiness reasoning. arXiv preprint arXiv:2408.12333, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.893, + 0.885, + 0.925 + ], + "angle": 0, + "content": "Mingchen Zhuge, Wenyi Wang, Louis Kirsch, Francesco Faccio, Dmitrii Khizbullin, and Jürgen Schmidhuber. Language agents as estimizable graphs, 2024. URL https://arxiv.org/abs/2402.16823." + }, + { + "type": "list", + "bbox": [ + 0.113, + 0.103, + 0.885, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "74" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.103, + 0.885, + 0.181 + ], + "angle": 0, + "content": "Jingming Zhuo, Songyang Zhang, Xinyu Fang, Haodong Duan, Dahua Lin, and Kai Chen. ProSA: Assessing and understanding the prompt sensitivity of LLMs. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (eds.), Findings of the Association for Computational Linguistics: EMNLP 2024, pp. 1950-1976, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-emnlp.108. URL https://aclanthology.org/2024 findings-emnlp.108/." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.189, + 0.885, + 0.235 + ], + "angle": 0, + "content": "Daniel M. Ziegler, Nisan Stiennon, Jeffrey Wu, Tom B. Brown, Alec Radford, Dario Amodei, Paul F. Christiano, and Geoffrey Irving. Fine-tuning language models from human preferences. CoRR, abs/1909.08593, 2019. URL http://arxiv.org/abs/1909.08593." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.244, + 0.885, + 0.276 + ], + "angle": 0, + "content": "Kaijian Zou, Muhammad Khalifa, and Lu Wang. Retrieval or global context understanding? on many-shot in-context learning for long-context evaluation. arXiv preprint arXiv:2411.07130, 2024." + }, + { + "type": "list", + "bbox": [ + 0.115, + 0.103, + 0.885, + 0.276 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "75" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09037/7cd60625-0d0d-4f3d-b38f-7ab687b11e36_origin.pdf b/data/2025/2504_09xxx/2504.09037/7cd60625-0d0d-4f3d-b38f-7ab687b11e36_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..00148882ad7313dbb39d3b4731c9637263db45ad --- /dev/null +++ b/data/2025/2504_09xxx/2504.09037/7cd60625-0d0d-4f3d-b38f-7ab687b11e36_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:949c3fbc3033b3d4341b05cdc30566f048cb552efbf99469a5623b107fd9ecaf +size 2178874 diff --git a/data/2025/2504_09xxx/2504.09037/full.md b/data/2025/2504_09xxx/2504.09037/full.md new file mode 100644 index 0000000000000000000000000000000000000000..1692bc8a85aa55a70ad5edefca91a217180b4b12 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09037/full.md @@ -0,0 +1,1226 @@ +# A Survey of Frontiers in LLM Reasoning: Inference Scaling, Learning to Reason, and Agentic Systems + +Zixuan Ke\* + +Fangkai Jiao + +Yifei Ming* + +Xuan-Phi Nguyen* + +Austin Xu\* + +Do Xuan Long†,‡ + +Minzhi Li† ‡ + +Chengwei Qin + +Peifeng Wang* + +Silvio Savarese* + +Caiming Xiong* + +Shafiq Joty\*, + +\*Salesforce AI Research +Nanyang Technological University + +† National University of Singapore +$^\ddagger I^2 R$ , $A^{*}STAR$ , Singapore + +zixuan ke@salesforce.com + +jiaofangkai@hotmail.com + +yifei.ming@salesforce.com + +xnguyen@salesforce.com + +austin.xu@salesforce.com + +xuanlong.do@u.nus.edu + +li.minzhi@u.nus.edu + +chengwei003@e.ntu.edu.sg + +peifeng.wang@salesforce.com + +ssavarese@salesforce.com + +cxiong@salesforce.com + +sjoty@salesforce.com + +# Abstract + +Reasoning is a fundamental cognitive process that enables logical inference, problem-solving, and decision-making. With the rapid advancement of large language models (LLMs), reasoning has emerged as a key capability that distinguishes advanced AI systems from conventional models that empower chatbots. In this survey, we categorize existing methods along two orthogonal dimensions: (1) Regimes, which define the stage at which reasoning is achieved (either at inference time or through dedicated training); and (2) Architectures, which determine the components involved in the reasoning process, distinguishing between standalone LLMs and agentic compound systems that incorporate external tools, and multiagent collaborations. Within each dimension, we analyze two key perspectives: (1) Input level, which focuses on techniques that construct high-quality prompts that the LLM condition on; and (2) Output level, which methods that refine multiple sampled candidates to enhance reasoning quality. This categorization provides a systematic understanding of the evolving landscape of LLM reasoning, highlighting emerging trends such as the shift from inference-scaling to learning-to-reason (e.g., DeepSeek-R1), and the transition to agentic workflows (e.g., OpenAI Deep Research, Manus Agent). Additionally, we cover a broad spectrum of learning algorithms, from supervised fine-tuning to reinforcement learning such as PPO and GRPO, and the training of reasoners and verifiers. We also examine key designs of agentic workflows, from established patterns like generator-evaluator and LLM debate to recent innovations. Finally, we identify emerging trends, such as domain-specific reasoning systems, and open challenges, such as evaluation and data quality. This survey aims to provide AI researchers and practitioners with a comprehensive foundation for advancing reasoning in LLMs, paving the way for more sophisticated and reliable AI systems. + +![](images/a76aac3ee69b5e7e56531440627ceca98535c9e7ba6066bdb1b715823ec14a60.jpg) +Figure 1: The LLM reasoning surge. We show the cumulative number (in thousands) of papers published from 2022 to 2/2025, based on Semantic Scholar keyword search. Research on reasoning regimes and agent architectures has accelerated notably since the introduction of Chain-of-Thought (CoT) in 2022. This growth is further influenced by other major developments, such as the release of ChatGPT (Ouyang et al., 2022) in 9/2022, and popularity of in-context learning (Brown et al., 2020) as an inference-time optimization method. + +![](images/dc2db5b5890553285102598a559db40fef121a42cef38c9a947e2f9dd3832578.jpg) + +# 1 Introduction + +Reasoning is the cognitive process of analyzing evidence, constructing arguments, and applying logic to form conclusions or make informed judgments. It is essential to many intellectual pursuits, including decision-making, problem-solving, and critical thinking. The study of reasoning spans multiple disciplines—philosophy (Passmore, 1961), psychology (Wason & JohnsonLaird, 1972), and computer science (Huth & Ryan, 2004)—as it provides insights into how individuals interpret information, evaluate alternatives, and develop sound conclusions using logic. + +Recently, large language models (LLMs) have demonstrated a range of emerging abilities, such as in-context learning (Dong et al., 2024), role playing (Shanahan et al., 2023b) and domain adaptation (Ke et al., 2023; 2025a; Ke & Liu, 2023) as they scale, with reasoning becoming one of the most critical capabilities. As shown in Figure 1, this area has rapidly gained research attention, often referred to as LLM reasoning or reasoning language model (RLM) (Besta et al., 2025). The increasing focus on this topic is understandable, as reasoning capability is: (i) Challenging, requiring multi-step processing beyond the token-by-token generative nature of auto-regressive LLMs; (ii) Fundamental, as it is a core aspect of intelligence, particularly in planning and strategic decision-making; and, most importantly, (iii) Promising, as recent advances in LLMs hint at a viable path forward. Given these factors, reasoning is widely regarded as a prerequisite for more advanced AI systems approaching Artificial General Intelligence (AGI), beyond the conventional AI that aims to closely follow instruction (Duenas & Ruiz, 2024). + +Reasoning requires LLMs to go beyond directly producing an answer from a question; instead, they must generate the thinking process (implicitly or explicitly) in the form of 'question $\rightarrow$ reasoning steps $\rightarrow$ answer'. It has been shown that scaling pre-training may not be the optimal solution for improving reasoning (Snell et al., 2025; OpenAI, 2025). Instead, one popular approach to achieve this is the well-known chain-of-thought (CoT) prompting (Wei et al., 2022b), which demonstrates that by modifying the prompt (e.g., 'Let us think step by step') or in-context samples, LLMs can elicit a step-by-step reasoning process at test time without additional training. Such intuitive prompting techniques have been shown to substantially improve LLMs' reasoning accuracy (Wei et al., 2022b). Building on this, the ability of LLMs to reason effectively depends on two factors: how and at what stage reasoning is achieved, and what components are involved in the reasoning process. Accordingly, in this survey, we categorize existing research into two orthogonal dimensions: (1) Regime, refers to whether reasoning is achieved through inference-time strategies (aka. inference-time + +scaling) or through direct learning and adaptation (learning to reason); and (2) Architecture, refers to whether reasoning happens within a single, standalone LLM or within an interactive, agentic system. + +These two dimensions are orthogonal, meaning different regimes can be applied to the same architecture, and different architectures can operate under the same regime. The intersection of these dimensions allows for a more comprehensive and systematic organization of reasoning techniques, encompassing most approaches studied to date while highlighting key trends, such as the shift from inference scaling to learning-to-reason and from standalone LLMs to agentic systems. Notably, most prior surveys have focused on only one or two of these dimensions, typically inference scaling and standalone LLMs, rarely considering both together (see detailed comparison later). By introducing this categorization, we aim to provide a structured perspective that clarifies the diverse landscape of LLM reasoning and establishes a foundation for future research. + +# 1.1 Reasoning Regimes + +Inference scaling CoT prompting demonstrates the potential to scale inference-time (test-time) reasoning. It has also been shown that optimal scaling of test-time compute can be more effective than scaling model parameters (Snell et al., 2024), as it improves generalization through enhanced flexibility in prompt and workflow design. Building on this, inference scaling techniques have emerged, allowing additional test-time computation before generating an answer. The key idea is that instead of updating the LLM itself, these methods aim to select the best trajectories to improve reasoning. + +Several variants of prompting methods (Paranjape et al., 2021; Sanh et al., 2022; Mishra et al., 2022) have been introduced, providing structured prompts to enhance reasoning. Additionally, inference scaling optimizes reasoning through search and planning (Dua et al., 2022; Zhou et al., 2023a; Khot et al., 2023; Suzgun & Kalai, 2024a). One key challenge in search and planning is evaluating the quality of candidate solutions. However, evaluating reasoning quality is inherently difficult, even for humans. Existing approaches can be categorized based on whether they judge the final outcome, i.e., outcome reward models (ORMs) (Hendrycks et al., 2021b), or the reasoning process, i.e., process reward models (PRMs) (Lightman et al., 2024). + +One of the most notable milestones in this direction is OpenAI's o1 (09/2024) (OpenAI et al., 2024), which demonstrate the effectiveness of inference-time scaling in complex tasks like mathematics, coding and scientific problem-solving: + +"We have found that the performance of o1 consistently improves with more reinforcement learning (train-time compute) and with more time spent thinking (test-time compute). The constraints on scaling this approach differ substantially from those of LLM pretraining, and we are continuing to investigate them." — OpenAI o1 release blog + +Learning-to-reason Another approach to unleash the deliberate thinking is updating the LLM through training. Unlike inference scaling, learning-to-reason aims to enhance reasoning capabilities through dedicated training, reducing reliance on costly inference-time computations. However, a key challenge in this regime is the scarcity of training data, as step-by-step human-annotated reasoning trajectories are prohibitively expensive to collect. To address this, research has focused on automatically generating such trajectories and developing effective training strategies to leverage them. For example, supervised fine-tuning with long CoT (Muennighoff et al., 2025) or preference learning with reasoning preference data, with DPO (Rafailov et al., 2023) as a representative approach. More recent approaches even bypass reasoning annotation by using reinforcement learning (RL), with recent work like GRPO (Shao et al., 2024) demonstrating remarkable success in this direction. A significant milestone in this direction is DeepSeek-R1 (01/2025) (DeepSeek-AI et al., 2025), an open-source model that achieves performance comparable to OpenAI's o1 while requiring far fewer computational resources. It further reveals that RL alone is possible to learn the sophisticated behaviors just as the test-time computation increase: + +"One of the most remarkable aspects of this self-evolution is the emergence of sophisticated behaviors as the test-time computation increases. Behaviors such as reflection—where the model revisits and reevaluates its previous steps—and the exploration of alternative ap- + +![](images/60f990a388a33bd4469511e2fcb952ddf14d2a5717e7f7f12189e6a1dacfb1bf.jpg) +Figure 2: The proposed categorization over regimes, architectures, and unified perspectives in this survey. + +proaches to problem-solving arise spontaneously. These behaviors are not explicitly programmed but instead emerge as a result of the model's interaction with the reinforcement learning environment." — DeepSeek-R1 'Aha moment' + +# 1.2 Reasoning System Architecture + +Standalone LLM and agentic systems Orthogonal to the regimes, studies have explored architectural advancements in LLM reasoning, moving beyond next-token prediction in standalone models to embrace agentic systems—AI systems that exhibit interactivity and autonomy to refine reasoning and decision-making. These systems go beyond the challenges of inference scaling or learning to reason; they introduce system-level complexities, such as designing workflows and coordinating potentially conflicting actions. + +Single-Agent and multi-agent systems To distinguish agentic systems from standalone LLMs, we adopt the perspective of Kapoor et al. (2024), framing agentic behavior as a spectrum. We categorize these systems into two families: single-agent and multi-agent. In single-agent systems, a single LLM interacts with tools in its environment to refine reasoning, actions, and perceptions. These tools include external knowledge bases (Ke et al., 2024; Hammane et al., 2024; Sun et al., 2023), verifiers (Wan et al., 2024c; Guan et al., 2025), and practical applications like code interpreters, calendars, and maps (Yu et al., 2023b; Lu et al., 2024a). By leveraging these resources, the LLM iteratively enhances its decision-making and problem-solving capabilities. Recent milestones in single-agent systems, such as Grok 3 Deep Search (02/2025) and OpenAI Deep Research (02/2025), demonstrate how agents interact with the web to significantly improve reasoning, perform tasks like information retrieval, use code interpreters for calculations, and aggregate data from multiple sources. + +"Deep research independently discovers, reasons about, and consolidates insights from across the web. To accomplish this, it was trained on real-world tasks requiring browser and Python tool use ... While o1 demonstrates impressive capabilities in coding, math, and other technical domains, many real-world challenges demand extensive context and information gathering from diverse online sources." — OpenAI deep research release blog + +The second family, multi-agent systems, goes beyond agent-environment interactions by enabling agent-agent communication. Each agent takes on a distinct role and exchanges messages with others. Key challenges include designing effective communication protocols—whether collaborative (Chen et al., 2023c) or adversarial (Liang et al., 2023b)—and coordinating actions to reach consensus on the final action for the environment. A recent example of this potential is Manus, a popular product showcasing the power of multi-agent systems. + +# 1.3 Unified Perspectives + +Although inference scaling and learning-to-reason take different approaches to improving reasoning, they are inherently connected. Inference scaling focuses on selecting the best reasoning trajectories, while learning-to-reason leverages both good and bad trajectories as training data. To unify these approaches, we categorize reasoning trajectory collection techniques in both regimes based on two key perspectives: input and output. At the input level, techniques modify or augment prompts to guide the LLM toward desirable reasoning paths. At the output level, the LLM generates multiple candidate responses, which are then evaluated, ranked, or refined. This framework highlights that many inference scaling techniques—such as prompt modification or trajectory search—can be repurposed for trajectory collection in learning-to-reason (as described in Section 3 and Section 5). Moreover, this connection shows that the two approaches are complementary: inference scaling methods can be applied to models trained under learning-to-reason, motivating the development of inference-aware learning-to-reason methods (Section 5.4). + +These aspects are also effective across different architectures. Similar to standalone LLMs, we categorize techniques based on input and output perspectives. However, to align with agentic system conventions, we use perception as input (to an agent) and action as output (of an agent) in single-agent systems. For multi-agent systems, we consider communication as input (to a participating agent) and coordination as output (of the system). This analogy provides a unified perspective across regimes and architectures, offering a systematic and generalizable framework for analyzing LLM reasoning (see Figure 2). + +# 1.4 Goal and Structure of the Survey + +The goal of this survey is to provide a comprehensive overview of key algorithmic details and major milestones in LLM reasoning research, particularly since the emergence of Chain-of-Thought (CoT), across both regime and architecture dimensions. We believe this is a timely and valuable contribution to the community, given the clear acceleration in research following CoT's introduction in 2022 (Figure 1). The rapid growth in studies exploring all aspects of LLM reasoning—from regimes and architectures to training algorithms—highlights the increasing importance and utility of reasoning capabilities in advancing the field. + +Figure 2 provides an overview of the categorization in this survey, organized along two orthogonal dimensions. Within each architecture, there are two key perspectives to consider. The first perspective is input, or perception, or communication. This concerns how to construct a better prompt, refine the given observations from the environment, or establish protocols for exchanging messages with other agents. The second is output—encompassing action or coordination—which involves aggregating outputs, enhancing actions, or coordinating actions to produce a final result. While the figure illustrates high-level categorizations, the following sections delve into more specific terms. For example, 'input' is discussed in terms of constructing prompts (see e.g., Sections 3.1.1 and 5.1.1), while 'output' relates to optimizing output and collecting high-quality trajectories (e.g., Sections 3.1.2 and 5.1.2). + +Figure 3 outlines the structure of this survey. We start with a brief introduction to the background, covering key terminologies, components, regimes, and architectures (Section 2). The subsequent sections explore inference scaling (Section 3), learning algorithms for reasoners and verifiers (Section 4), and learning to reason (Section 5). Within the discussions on inference scaling and learning to reason, we examine three key architectures: Standalone LLMs, Single-Agent systems, and Multi-Agent systems. Finally, Section 6 summarizes key insights and discusses open challenges and future directions. + +# 1.5 Comparison to Related Surveys + +Reasoning in LLMs has long been a fundamental challenge in the field. Earlier works, such as Huang & Chang (2023), provide a comprehensive overview of the evolution of informal deductive reasoning covering developments prior to the emergence of LLM agents and Reasoning Language Models (RLMs). Our work extends this discussion by focusing on LLM agents and RLMs. Qiao et al. (2023b) offer a detailed summary of advancements in LLM reasoning, with a particular emphasis on prompting techniques. In contrast, we offer a broader range of regimes (from inference to training) and architectures (from standalone LLM to multi-agent systems). Readers interested in a formal definition and taxonomy of natural language reasoning—grounded + +![](images/67d40ba5d0383a1d1ca495c78f52298962e74f965a5a0cc2b362840f3cacbaab.jpg) +Figure 3: Taxonomy of LLM reasoning research organized in this survey by regimes (inference scaling, learning to reason) and architectures (standalone LLM, single-agent, multi-agent). Each leaf node includes examples from the literature that focus on the corresponding category. + +in philosophical foundations—may refer to Yu et al. (2024a), which focuses specifically on this direction and is complementary to our scope. + +Improvements in LLM reasoning are closely tied to advancements in a variety of techniques. Dong et al. (2024) present a comprehensive survey on in-context learning (ICL), while Zhou et al. (2024c) explore the interpretation and analysis of ICL from both theoretical and empirical perspectives. In contrast, our work organizes ICL techniques under different regimes—standalone LLMs, single-agent, and multi-agent + +systems—highlighting how these techniques evolve and interact within each setting. Recent studies suggest that enhancements in reasoning are often linked to inference scaling. Dong et al. (2024) provide an extensive review of inference-time self-improvement, and Welleck et al. (2024) offer a survey focused on three key themes: token-level generation algorithms, meta-generation algorithms, and efficient generation. Following the release of Reasoning Language Models (RLMs) such as OpenAI's o1 and DeepSeek's R1, there has been a significant increase in research dedicated to learning-to-reason approaches. Zeng et al. (2024) and Xu et al. (2025d) provide thorough surveys on these emerging developments. However, these surveys primarily focus on LLMs, and do not address agentic or multi-agent reasoning settings in depth. + +Research on LLM reasoning has predominantly centered on logical and mathematical reasoning. Liu et al. (2025a) offer a comprehensive survey of logical reasoning in LLMs, delving into its theoretical foundations and associated benchmarks. In their position paper, Yang et al. (2024d) underscore the pivotal role of formal mathematical reasoning, showcasing its superiority over traditional NLP-based methods in generating verifiable proofs and automated feedback. Their work outlines progress in theorem proving and auto-formalization while identifying key challenges that remain. While we cover domain-specific reasoning in Section 6.1.3, we refer readers to Liu et al. (2025a) and Yang et al. (2024d) for a more in-depth treatment of these topics. + +Reasoning is a critical capability in agentic systems (Pezeshkpour et al., 2024; Masterman et al., 2024). While numerous reviews focus on agent systems (Xi et al., 2023; Kapoor et al., 2024), discussions on reasoning within these systems remain limited. A concurrent work by Besta et al. (2025) introduces a comprehensive and modular framework for RLMs that systematically organizes key components such as reasoning structures, strategies, benchmarks and learning algorithms. However, their work does not delve into agentic and multiagent LLM systems.1 + +This survey provides a comprehensive overview of major milestones in LLM reasoning research, emphasizing two key dimensions: (1) the evolution of learning schemes—from inference scaling to learning-to-reason approaches—and (2) architectural advancements—from single LLMs to multi-agent systems. These dimensions summarize recent progress and lay the groundwork for future reasoning LLMs and agentic systems. We unify techniques under input and output perspectives, clarifying what must be customized or designed when building reasoning systems. Additionally, we detail essential techniques, including a comparison of the latest learning algorithms (e.g., RL) and an in-depth discussion of refiners and verifiers, which are critical for facilitating reasoning. Given these contributions, our survey is timely, offering AI researchers up-to-date insights into the field. We anticipate further research along these dimensions, such as agent-human regimes (Liang et al., 2024) and automated workflow design architectures (Hu et al., 2025; Zhang et al., 2024c; Zhou et al., 2025a). + +# 2 Background + +In this section, we introduce foundational concepts that will be utilized throughout the paper. + +# 2.1 Problem Formulation + +LLM reasoning is often formulated within the Markov Decision Process (MDP) framework (Bellman, 1958), treating reasoning as a sequential decision-making process. While many of the terminologies in LLM reasoning originate from the AI agent and reinforcement learning (RL) literature (Russell & Norvig, 2010), their meaning in LLM reasoning can sometimes differ to suit the nature of LLM-based reasoning. + +Reasoning step and thought The definition of what makes a reasoning step can vary depending on the specific inference or learning algorithm used, and it often depends on the granularity at which rewards (or feedback) are considered. Generally, a reasoning step can be expressed as a sequence of tokens $a_{t} = (x_{t_{1}},\ldots ,x_{t_{K}})$ , where $x_{t_k}$ is the $k$ -th token at inference step $t$ . Typically, $a_{t}$ represents a coherent step in reasoning (Lightman et al., 2024), such as a logical deduction or an intermediate conclusion. However, in extreme cases, a reasoning step can be the entire response (Zhang et al., 2024b; DeepSeek-AI et al., 2025) + +
SymbolName/terminologyExplanation
atAction/responseThe reasoning step or action taken at time step t , where t ∈ {1,2,...,T}
stState/contextst := (q, a1, ..., at-1), where q is the prompt/question.
RReward model/verifierEvaluates the reasoning quality of action at state st, providing feedback.
rtRewardrt := R(st, at), reward given by verifier at time step t.
τTrajectoryτ := ((s0, a0, r0), ..., (sT, aT, rT)), The entire reasoning process leading to an answer.
πPolicy model/reasonerat ~ π(at|st): The reasoning strategy that maps a reasoning state to the next reasoning step.
VValue ModelEstimates the expected future reasoning quality from state st.
FRefinera′t = F(st, at, rt): Modifies or refines the action based on feedback from the verifier.
+ +Table 1: An overview of symbols and terminologies for convenience. + +or a single token (Schulman et al., 2017; Ouyang et al., 2022).2 The term Thought generally refers to the sequence of reasoning steps (i.e., reasoning trajectory) that occur from the question (excluding the question itself) to the final answer (excluding the final answer). + +Reasoning as MDP An MDP is a general framework for modeling environments where an agent makes sequential decisions by observing states and receiving rewards for its actions. The state-action-reward trajectories in an MDP can be formally expressed as: $\tau = ((s_0, a_0, r_0), \ldots, (s_T, a_T, r_T))$ , where $T$ is the trajectory length. Naturally, LLM reasoning can be framed as an MDP, as each reasoning step builds upon previous ones to arrive at a final answer $(s_T)$ from a question $(s_0)$ . However, a key distinction lies in how the state transition function $P(s_{t+1} | s_t, a_t)$ is defined. In traditional MDPs, state transitions are driven by the environment (unknown to the agent). In LLM reasoning, this depends on the system architecture: in standalone LLMs, the model itself generates the next state, whereas in agentic systems, state transitions can be influenced by external tools within the environment. + +In RL-based approaches, the goal is to maximize the reasoning quality measured by the cumulative reward: + +$$ +\max \mathbb {E} _ {\tau \sim P (\tau | s _ {0}, \pi)} \left[ \sum_ {t = 1} ^ {T} r _ {t} \right], \tag {1} +$$ + +where $\pi$ is the reasoning policy and $r_t = \mathcal{R}(s_t, a_t)$ is the reward given by the reward function $\mathcal{R}$ at time step $t$ . There are two primary approaches to optimize Equation 1. The first is via training, which involves optimizing model parameters to learn the optimal policy $\pi$ through methods like preference learning (e.g., DPO (Rafailov et al., 2023)) or reinforcement learning (e.g., PPO (Schulman et al., 2017)). The second is inference-scaling, which optimizes Equation 1 without altering model parameters. Instead, it employs a form of "search" with a frozen model, often guided by a reward model (Zhang et al., 2025b). We summarize key terminologies in Table 1. + +# 2.2 Key Components of LLM Reasoning Systems + +An LLM-based reasoning system may contain three key components depending on the reasoning regime and system architecture: (a) A Reasoner that generates the reasoning steps, serving as the policy model; (b) Verifiers that evaluate the correctness of the final outcome and/or reasoning steps, serving as reward functions; and (c) A Refiner that improves reasoning trajectories by refining responses based on the feedback from the verifier. Figure 4 shows a depiction of these components. While these components play complementary and important roles in a reasoning system, they can be implemented by the same LLM, e.g., self-refinement (Saunders et al., 2022; Madaan et al., 2024) unifies them. + +Reasoner The reasoner generates reasoning steps based on the current state of the reasoning process. It takes as input the previous states and outputs the next response or action. As the core component of a reasoning system, it determines how reasoning progresses and influences the final outcome. + +![](images/26c8347b750ab38973406a291c8177a4bbe8c7b3a3166b8f3370a1e920c5081b.jpg) +Figure 4: Three key components of a reasoning system. The Reasoner proposes new responses (usually accompanied with rationales) for a query. The Verifier takes as input a verification instruction (e.g., what aspects to evaluate) and the response(s) from the reasoner, then outputs a judgment on the response(s) (often in the form of a numeric score or relative order, and typically accompanied by a natural language critique or rationale for its judgment). The Refiner, unlike the first two, takes as input an incorrect response and optionally the critique (as provided by the verifier) and outputs a revised response. + +**Verifier** The verifier assesses the quality of the final answer or intermediate reasoning steps and provides feedback to the reasoner. Verifiers can be outcome-level, where only the outcome is evaluated, or process-level, where intermediate reasoning steps are also evaluated. The type of feedback can range from a scalar reward (e.g., correct/wrong answer on a math problem or pass/fail for code test case) to natural language explanations. When ground-truth is available (e.g., during training), the verifier can be implemented using rule-based functions (e.g., string matching) or by training a reward model or using an LLM-judge model. + +Refiner Given a feedback from the verifier, as well as a response from the reasoner, a refiner tries to improve and polish the original reasoning trajectory containing flaws. Refiners can play two important roles in reasoning. First, it can serve as a general approach to improve the performance during inference. More importantly, by providing explicit analysis, a refiner can also conduct implicit search, i.e., pointing out the obstacles in current trajectory, and offer a new perspective to compress the search space. Yet, recent studies (Qu et al., 2024a) show that is not at least easier than learning reasoning. + +# 2.3 System Architectures + +Building on the three key components introduced above, in this section, we describe how these elements are organized within different system architectures to achieve effective reasoning. While the three components serve as the foundation, their integration and interaction vary across architectural paradigms. In this survey, we structure reasoning systems into three main types: standalone LLM, single-agent system, and multi-agent system. Figure 5 shows their comparison with visualizations. + +# 2.3.1 Standalone LLM Systems + +A standalone LLM system comprises a single LLM which can play the role of one or more components (we refer this as unified components) in the reasoning system. It processes an input prompt and generates final outputs, which often include rationales or reasoning steps. As an LLM, it has the capability to produce diverse rationales through sampling—a key property utilized by many advanced reasoning techniques. Importantly, a standalone LLM operates independently, without interacting with external environments or collaborating with other LLMs. Its decision-making is based solely on simple input-output mappings or through iterative + +![](images/ff18fb1a724ca38991f358e0f8a0b23cfed9336202da57dbd8f7c7110d2f0334.jpg) +Figure 5: Three architecture types used for designing a reasoning system in the context of LLMs. highlights perspectives that the literature emphasizes for customization. + +sampling from the same model, where the prompt incorporates prior reasoning steps (a method known as self-contained reasoning). This self-contained nature allows the LLM to function autonomously while maintaining coherence in its reasoning processes. + +# 2.3.2 From Standalone LLM to Language Agents + +While the concept of an agent has been a long-standing idea in AI (Russell & Norvig, 2010), the notion of language agents has gained prominence alongside recent advancements in LLMs. The key distinction between an agent and a standalone LLM lies in two advanced capabilities: interactivity (Weng, 2023; Yao & Narasimhan, 2023) and autonomy (Xi et al., 2023; Wang et al., 2024d). Interactivity refers to an agent's ability to engage with the external world, including environments or other agents. This capability is crucial because LLMs, while powerful, often have limited knowledge and reasoning abilities confined to their internal memory. By enabling interaction with the external world, an LLM can augment its internal knowledge with external information, significantly expanding its understanding and grounding its outputs in real-world observations. Autonomy, on the other hand, refers to an agent's ability not only to follow human instructions but also to independently initiate and execute actions. This capability often involves planning but can extend to more complex behaviors. For instance, a fully autonomous agent should be capable of detecting novel situations, proactively taking initiative, and determining effective interaction strategies without explicit human guidance. These advanced capabilities distinguish LLM-based agents from standalone LLMs, enabling them to operate more dynamically and adaptively in real-world scenarios. + +To delineate the boundary between the agent and its environment, we employ the concept of controllability (Sumers et al., 2024). Specifically, the environment is defined as an external module that the agent cannot modify. For example, a knowledge base containing resources like Wikipedia or a compiler is considered part of the environment because the agent cannot alter it. Similarly, another LLM acting as a judge or verifier is also treated as part of the environment, as its outputs operate independently of the agent. In contrast, + +components like working memory or prompts that the agent can directly modify are not classified as part of the environment. + +In this work, we adopt the perspective of Kapoor et al. (2024), which conceptualizes agentiness as a spectrum. The more interactiveness and autonomy an LLM exhibits, the more agentic it is considered to be. In the upper right of Figure 5, we illustrate this spectrum visually. Within this spectrum, we define a system with agent-environment interaction as a single-agent system and a system that additionally incorporates agent-agent communication as a multi-agent system. + +# 2.3.3 Single-agent Systems + +Given the definitions above, the interaction between the agent and its environment is a central aspect of single-agent systems. These interactions can vary widely in complexity and design. In Figure 5, we illustrate a single-agent system in the bottom left. The focus here is on designing the agent's actions—such as tool use, retrieval, or answer refinement—and obtaining useful perceptions from the environment, which may include feedback from an external verifier or compiler, or data from a knowledge base (KB). This architecture enhances the LLM's capabilities by enabling it to dynamically engage with and adapt to external contexts. + +While a fully autonomous agent should ideally learn to interact with the environment automatically, the literature identifies several predefined interaction patterns (also referred to as workflows (Schluntz & Zhang, 2024)) that have proven effective. We elaborate on these patterns below and, in Sections 3.2 and 5.2, explore specific techniques that leverage them to improve agent performance. + +- Generator-evaluator pattern. This pattern divides the reasoning capability into two distinct components: a generator and an evaluator (e.g., a verifier or other evaluators like compilers). It represents a natural extension of RL-style optimization and has gained popularity since the introduction of RLHF (Ouyang et al., 2022). In this setup, the evaluator functions as the environment, providing feedback on the quality of the agent's actions. Such feedback is particularly valuable for guiding the search for effective actions and improving decision-making. Recent studies have demonstrated that verifiers can significantly enhance the performance and generalization capabilities of agents (Zhang et al., 2024i; Sun et al., 2024c). However, this pattern is not without its challenges. It can suffer from unreliable components and error propagation. For instance, Kim et al. (2024d) points out that verifiers are vulnerable to reward hacking, where the reasoner exploits loopholes in the verifier to achieve higher reward scores, ultimately degrading the overall performance of the agentic system. + +- Generator-critic-refiner pattern This pattern divides reasoning capabilities into three components: a reasoner, a critic, and a refiner. The critic acts as the environment, providing feedback—typically in the form of guidance on how to correct errors in the generated actions. The refiner then takes the flawed actions and the critic's feedback as input, producing revised and improved actions. This pattern enables the agentic system to benefit from iterative feedback, making it particularly effective for complex tasks where the initial outputs of the reasoner are suboptimal. However, it may also lead to a phenomenon known as 'over-refinement' (Chen et al., 2024b), where the agent iterates excessively, leading to diminishing returns or even degraded performance rather than improvement. Careful design and balancing of the refinement process are essential to mitigate this risk and ensure the pattern's effectiveness. + +# 2.3.4 Multi-agent Systems + +In addition to the agent-environment loop in single-agent systems, multi-agent systems introduce an additional agent-agent loop, where multiple agents interact and influence one another. In this framework, agents assume different roles, exchange messages, and collaboratively coordinate their actions while operating within a shared environment.4 Figure 5 shows an example multi-agent system. It involves $N$ agents (often playing distinct roles) and $M$ rounds of communication through message exchanges. The focus is on designing effective communication protocols (e.g., debates) and coordinating the agents' actions to determine a final decision or action within the environment (e.g., employing an additional judge to adjudicate final actions). The following communication patterns have emerged as effective predefined strategies: + +![](images/9d408ce1bd52f2ac1a399692e04982fa9e638b7c08e8b740c6577799ac51cbb3.jpg) +Figure 6: Inference-time and training-time regimes of a reasoning system. We use tree search as an example to illustrate the inference scaling and trajectories collection. Given a query, inference scaling relies on extensive inference computation to improve the reasoner's distribution. Specifically, it generates multiple candidate reasoning steps at each layer and selects the best solution to proceed (e.g., by using an external verifier or assembling). In contrast, learning to reason focuses on collecting trajectories and training from the collected data with minimal inference-time computation. It takes all trajectories in the process (identical to those used in inference-scaling, allowing us to reuse the same tree) and labels them with preferences. The preference data can then be used to train the reasoner. + +- Debate pattern. In this pattern, two or more agents engage in a debate with each other. The term debate can vary in implementation. For example, in (Wang et al., 2024h), it involves agents addressing the problem independently and incorporating other agents' responses as additional advice. In (Liang et al., 2023b), it means agents approach the problem from opposing perspectives. After the debate, a consensus is reached through mechanisms such as an additional judge, weighted voting, or a fixed number of iterations, ultimately determining the collective action to be taken in the environment. +- Reconcile pattern. This pattern facilitates collaborative round-table discussions among agents, enabling them to reach a consensus through mechanisms such as voting or confidence levels. For instance, ReConcile (Chen et al., 2023c) introduce a round-table discussion framework where agents make decisions using a weighted voting system. In this process, each agent assigns a confidence level to its proposed answers, and these confidence levels are used as weights to cast votes, ultimately determining the final decision. + +# 2.4 Reasoning Regimes + +Orthogonal to the components and architectures discussed above, reasoning systems can operate under distinct computational regimes. Systems employing inference-time computation can refine their outputs through iterative reflection and revision or search for improved solutions by repeatedly sampling the underlying model. However, such systems must balance cost (e.g., computational resources, latency) and effectiveness (e.g., accuracy, reliability) in achieving correct solutions. The learning-to-reason paradigm addresses this tradeoff by shifting computational burdens from inference to training, learning policies from simulated reasoning processes. While both regimes enhance effectiveness by redistributing computational effort across training and inference, they lack the capacity to dynamically adapt resource allocation or method selection to individual problems—a limitation highlighted in recent work (Sprague et al., 2024a; Kapoor et al., 2024; Chen et al., 2024d). To bridge this gap, emerging approaches within the learning-to-reason framework focus on optimizing the reasoning process itself, jointly minimizing cost and maximizing effectiveness. This involves dynamically allocating computational resources, searching for contextually optimal methods, and training models to synergize with adaptive inference-time strategies. Figure 6 contrasts these regimes, and we elaborate on each in the sections below. + +
PerspectiveMethodCharacteristicRepresentative Work
Constructing PromptsInstruction engineeringModify instruction by human-design templateParanjape et al. (2021); Zhou et al. (2023b)
Demonstration engineeringDrawing analogy from relevant experienceWei et al. (2022b); Luo et al. (2024d)
Prompt optimizationSearch for optimized prompt (e.g., bootstrap)Xu et al. (2022); Pryzant et al. (2023)
Optimizing OutputGenerating subtasksDecompose the original task into manageable subtasksDua et al. (2022); Zhou et al. (2023a)
Exploration and searchBranch and explore multiple paths to optimize reasoning trajectoriesYao et al. (2023a); Besta et al. (2024)
+ +Table 2: Summary of inference scaling with standalone LLM. + +# 2.4.1 Inference Scaling + +Inference scaling techniques enhance reasoning capabilities during test time by increasing the amount of computation performed before generating an answer. These methods can be broadly categorized into three key strategies: (a) Prompt engineering and optimization, which focuses on constructing effective reasoning-provoking prompts through template-based methods, human curation, and automated optimization. (b) Search and planning methods, which include task decomposition, plan generation and verification, and exploration-based approaches. They enable structured multi-step reasoning, often involving backtracking within trees or graphs, to systematically explore potential solutions and verify their validity. (c) System-level enhancements, which incorporates external tools, knowledge sources, and verification mechanisms to augment the model's reasoning capabilities. For standalone LLMs, inference scaling primarily revolves around prompt construction and search strategies. In multi-agent settings, it further extends to include agent-agent communication and coordinated action strategies, enabling collaborative problem-solving. While these techniques have demonstrated significant effectiveness in improving reasoning performance without requiring updates to model parameters, they often come with increased computational costs during inference. + +# 2.4.2 Learning to Reason + +This regime shifts the focus to training models to reason effectively before deployment, often referred to as training-time methods. The core idea is to simulate inference, generating trajectories that capture potential reasoning paths. These trajectories are then used to train the reasoner with online or offline learning methods. The methods include supervised and/or reinforcement learning. While learning-to-reason typically minimizes computational costs during inference, it incurs higher costs during simulation and training. In Section 5, we provide a detailed discussion of methods within this regime across different architectures. + +Recently, this paradigm has evolved to incorporate knowledge of both training and testing methods, enabling adaptive strategies. For instance, it now allows for the training of reasoners optimized for known inference techniques (Balashankar et al., 2024), or dynamically distributes computational costs between training and testing, offering a more flexible and efficient framework (Damani et al., 2025; Yue et al., 2025). + +# 3 Improving Reasoning with Inference Scaling + +Compared to small-scale models, pretrained large-scale language models (LLMs) have demonstrated emergent capabilities (Wei et al., 2022a), such as in-context learning (Dong et al., 2024) and role-playing (Shanahan et al., 2023a), which manifest without additional fine-tuning (i.e., without any gradient updates). Arguably, many of these abilities become apparent only after reaching a certain scale in model size. While scaling model parameters has been shown to improve reasoning performance across various tasks, the returns have diminished due to the high cost of training increasingly larger models. As a result, inference scaling has emerged as an appealing and orthogonal paradigm to unlock reasoning abilities in LLMs by providing additional test-time compute, allowing them to "think" before producing a final answer. It has been demonstrated that optimal scaling of test-time compute can be more effective than scaling model parameters (Snell et al., 2024), as it offers better generalization through enhanced flexibility in prompt and workflow design. Such deliberate thinking can be enabled either through training (DeepSeek-AI et al., 2025) or by explicit programming at inference time (OpenAI et al., 2024). In this section, we focus on the latter and defer training-time methods to Section 5. We begin with inference scaling methods for standalone LLMs and subsequently extend the discussion to single and multi-agent compound systems. + +# 3.1 Inference Scaling With Standalone LLM + +In this section, we examine the core components and techniques that have made inference-time reasoning methods effective. Many of these methods draw inspiration from research on human cognitive processes on planning, problem solving, and decision-making (Newell et al., 1959; 1972; Stanovich & West, 2000). + +# 3.1.1 Constructing Reasoning Provoking Prompts + +Although large-scale pre-training endows LLMs with patterns that support reasoning, these capabilities often remain latent under generic prompts. Liu et al. (2025c) demonstrate that deep-reasoning behaviors—such as reflection and self-verification, which signal profound analytical thought—can be amplified simply by increasing the sampling budget. This highlights the importance of designing prompts that deliberately provoke reasoning, thereby surfacing and leveraging the latent human priors within LLMs. + +Instruction engineering Enabling LLMs to reason effectively depends heavily on the quality of the instructions provided (Sclar et al., 2024; Zhuo et al., 2024; Long et al., 2024a). Recognizing this, numerous prompt engineering studies aim to improve LLM reasoning by enhancing instructions. Extensive efforts in this direction primarily focus on template-based and human-curated instructions (Paranjape et al., 2021; Sanh et al., 2022; Mishra et al., 2022; Si et al., 2023; Long et al., 2024b). With LLMs becoming increasingly adept at following human instructions and generating human-like text, focus has shifted toward leveraging the models themselves to craft and refine high-quality instructions. A notable example of this shift is the Automatic Prompt Engineer (APE) introduced by Zhou et al. (2023b), which uses LLMs to generate high-quality instructions, achieving performance comparable to or surpassing that of human annotators on 31 reasoning tasks. Furthermore, other studies have proposed methods to modify instructions for improved reasoning. For instance, Deng et al. (2023a) and Mekala et al. (2024) present Rephrase-and-Response and EchoPrompt, respectively, two simple yet effective strategies where LLMs are instructed to rephrase queries before answering, significantly enhancing LLM performance on reasoning tasks. Similarly, Tian et al. (2023) introduce R3 prompting, which instructs LLMs to first extract key sentences from noisy contexts, then rephrase the instruction to explicitly include extracted sentences. + +Demonstration engineering Humans can address new problems by drawing analogy from relevant past experience (Holyoak, 2012). Inspired by this, Yasunaga et al. (2024) propose analogical prompting to guide LLMs to self-generate exemplars or knowledge relevant to the given problem as few-shot demonstrations for reasoning, outperforming hand-crafted or retrieved examples. For example, LLMs are prompted to generate a problem on calculating a third-order determinant before solving the given fourth-order determinant. Similarly, Chen et al. (2023d); Yang et al. (2023a); Luo et al. (2024a) highlight the effectiveness of self-generated relevant exemplars. Qin et al. (2025) further systematically assess the capability of LLMs to perform analogical reasoning and find that performance is not primarily determined by whether the exemplars are topically relevant to the task. Instead, they show that even exemplars from unrelated domains, such as self-generated biological exemplars, can lead to improved performance, as long as they are accurate and structurally aligned with the reasoning steps required by the target task. This highlights that the quality of the exemplar (its correctness, clarity, and structural usefulness for reasoning) can be the key limiting factor, rather than the relevancy regarding to the topic domain. + +Conventionally, a fixed set of few-shot demonstrations is applied to all queries, which can be suboptimal, especially when queries vary significantly. An alternative approach is to retrieve demonstrations tailored to the current query. Research has shown that retrieval-based demonstration selection significantly improves task performance. The main goals for selecting demonstrations are similarity (Rubin et al., 2022; Agrawal et al., 2023; Li et al., 2023e; Ye et al., 2023a) and diversity (Levy et al., 2023; He et al., 2023; Kim et al., 2024a). Various retrieval strategies have been proposed for selecting $k$ demonstrations, including top- $k$ similarity-based retrieval (Liu et al., 2022; Li et al., 2023e), clustering-based retrieval (Luo et al., 2023c; Wang et al., 2024i), and iterative retrieval (Khattab et al., 2022; Levy et al., 2023; Wang et al., 2024e). These methods enable adaptive and effective demonstration selection, enhancing the model's reasoning and generalization across diverse queries. + +In addition, many-shot in-context learning has emerged as a complementary line of work, where hundreds or even thousands of demonstrations are provided to significantly enhance the performance of LLMs, especially on complex reasoning tasks (Li et al., 2023c; Agarwal et al., 2024; Zou et al., 2024; Gu et al., 2025). Many-shot prompting can be seen as an extreme form of demonstration engineering, where the focus is on scaling the quantity of demonstrations to maximize the model's capacity to learn from in-context examples. However, the effectiveness of many-shot ICL is often limited by the high cost of obtaining a large number of labeled demonstrations. To mitigate this gap, Chen et al. (2025) recently introduce MAPLE, a novel influence-based many-shot ICL framework that identifies impactful unlabeled samples, pseudo-labels them by querying LLMs, and adaptively selects them for each test query. This approach effectively enhances many-shot ICL performance with minimal labeling cost, demonstrating improved adaptability and reasoning capabilities of LLMs. + +Prompt optimization Prompt optimization methods, aiming to systematically and strategically optimize prompts for improved performance, have been extensively explored for enhancing LLM reasoning. For instance, Xu et al. (2022) introduce Genetic Prompt Search (GPS), leveraging genetic algorithms to search for the best instruction. Similarly, Guo et al. (2024a) and Fernando et al. (2024) employ evolutionary algorithms to iteratively refine instructions, while Long et al. (2024c) introduce a minimax-game framework, inspired by Generative Adversarial Networks (Goodfellow et al., 2014) to simultaneously optimize instructions and demonstrations. Furthermore, Pryzant et al. (2023) present the concept of "text gradients" which leverage feedback from prompt executions and LLMs to update prompts, akin to Optimization by PROempting (OPRO) (Yang et al., 2024c), which uses execution feedback. Despite these advances, the interplay between various prompt optimization algorithms remains underexplored. Recently, Wan et al. (2024a) conducted a comprehensive evaluation of representative techniques for instruction and demonstration optimization, examining their effectiveness in isolation and combination across a range of challenging tasks. Their findings indicate that intelligently reusing samples from prompt evaluations as demonstrations consistently enhances performance, that demonstration selection strategies can have a greater impact than instruction optimization techniques, and that a synergistic combination of demonstration and instruction optimization can outperform their individual contributions. + +# 3.1.2 Optimizing Reasoning Output with Search and Planning + +Generating reasoning subtasks Human problem-solving often involves planning manageable steps that lead to a successful resolution (Dostál, 2015). Likewise, improving LLM reasoning by breaking down complex problems into intermediate steps has become a successful paradigm. In this context, subtasks refer to the decomposed parts of a problem, structures are the frameworks guiding the reasoning process, and intermediate steps are intermediate results produced at each stage of problem-solving. Nye et al. (2021) and Wei et al. (2022b) pioneer this direction by proposing Chain-of-Thought (CoT) prompting which uses a few demonstrations with human-written intermediate steps to guide the model in solving complex problems in a similar style. Kojima et al. (2022) further simplified this approach by introducing zero-shot CoT prompting, which eliminates the need for demonstrations by instructing models to "think step by step" before answering. + +Simple CoT prompting often struggles as task complexity increases, particularly when the task surpasses the complexity of the provided demonstrations. To address this, researchers have proposed methods that explicitly guide models in decomposing tasks into subtasks, thereby enhancing intermediate step reasoning. Dua et al. (2022) propose an iterative approach, where tasks are progressively broken down into simpler subtasks and solved step-by-step. Similarly, Zhou et al. (2023a); Khot et al. (2023) and Suzgun & Kalai (2024a) advocate for a "divide-and-conquer" strategy, where tasks are first divided into subtasks and then solved sequentially. + +Beyond subtasks, researchers emphasize the importance of robust reasoning structures such as hierarchical and decision-making processes that capture the underlying mechanisms involved in problem-solving. Zhou et al. (2024b) introduce Self-Disccover, a framework that enables models to self-identify reasoning structures for any task using a seed set of general reasoning skill modules. Building on this, Aswani et al. (2024) propose Auto-Evolve, which dynamically adapts reasoning modules to accommodate more diverse problems. In addition to designing better reasoning steps, several studies address the need to correct intermediate + +
PerspectiveMethodCharacteristicRepresentative Work
Feedback RefinementVerifier and ReflectionUse verifiers to select, modify, or refine actionsSnell et al. (2025); Madaan et al. (2023b)
Action EnhancementRetrieval and ToolAccess external knowledge and specialized resourcesLi et al. (2024e); Ma et al. (2024a)
+ +Table 3: Summary of inference scaling with single-agent system + +steps. For example, Deng et al. (2024a); Yan et al. (2024) and Wu et al. (2024b) propose methods to refine intermediate outputs. Notably, Zhang et al. (2024i) observe that smaller models ( $\leq 13\mathrm{B}$ parameters) in particular need stronger models acting as verifiers to validate and correct intermediate steps. + +Exploration and search Research on human problem-solving reveals that complex reasoning tasks often admit multiple valid paths to reach a correct solution (Stanovich & West, 2000). Compared to linear reasoning structures like chain-of-thought, approaches that incorporate exploration during problem-solving have shown significant improvements for complex reasoning tasks. Unlike task decomposition methods (Dua et al., 2022; Zhou et al., 2023a; Khot et al., 2023), exploration-based approaches employ dynamic search through multiple possible reasoning paths simultaneously rather than following certain decomposition patterns, enabling models to explore ambiguous solution strategies for complex problems. Exploration typically involves two key components: branching and aggregation. Due to the stochastic nature of language model decoding, branching is often implemented through independent re-sampling with non-zero temperature, generating diverse reasoning chains. Early methods, such as self-consistency (Wang et al., 2023f), introduced branching only at the beginning of the reasoning chain, conditioned on the initial query. While simple, this approach lacks local exploration of intermediate reasoning steps, has limited applicability for tasks with multiple valid answers, and produces reasoning chains with restricted diversity (Chen et al., 2024d). More recent advancements, such as Tree-of-Thoughts (Yao et al., 2023a), Graph-of-Thoughts (Besta et al., 2024), and Forest-of-Thoughts (Bi et al., 2024), enable finer-grained branching by considering both the query and a history of previous thoughts or thought-state sequences, allowing for more nuanced and flexible exploration. + +The effectiveness of branched reasoning paths with thoughts or answers depends on aggregation or evaluation strategies. Recent progress is centered around two categories: ensemble-based methods and verifier-based methods. Ensemble-based methods have been widely employed due to their simplicity and self-contained nature, requiring no external knowledge or sources for validation. These approaches typically employ strategies such as majority voting across answer tokens (Wang et al., 2023f; 2024a; Li et al., 2024b) or confidence-based selection (Wang & Zhou, 2024). Verifier-based methods, in contrast, employ external verifiers or judges to score and select preferred answers among candidate solutions. + +# 3.2 Inference Scaling With Single-agent System + +LLMs are trained on static, finite datasets, which inherently limits their parametric knowledge. This limitation hinders their ability to reason effectively in scenarios requiring up-to-date or highly specialized knowledge. The use of an agentic system, where LLMs are augmented with external verifiers, retrieval and tool integration, has proven effective in such scenarios. Verifiers provide reasoners with a signal of the quality of their outputs (e.g., a score or natural language feedback), which may be used by reasoners to modify or improve their outputs. Retrieval augmentation improves reasoning by enabling the agent to access relevant external knowledge, thereby reducing hallucinations and ensuring more accurate, fact-based responses. Additionally, the agent can achieve higher performance by leveraging specialized external tools to handle specific intermediate reasoning steps. For instance, allowing an agent to use a calculator can minimize errors stemming from inaccuracies in numerical generation. + +A pioneering approach in this domain is the ReAct framework (Yao et al., 2023b), which interleaves reasoning and acting by prompting LLMs to generate both reasoning traces and task-specific actions in an interleaved manner. This synergy allows the model to induce, track, and update action plans while interfacing with external sources (environment) to gather additional information. ReAct has demonstrated effectiveness across QA and interactive decision-making tasks. Building upon ReAct, LATS (Zhou et al., 2024a) unifies reasoning, acting, and planning within LLMs. By combining Monte Carlo Tree Search with ReAct, LATS enables structured search over a combinatorial space of reasoning and acting paths. More recently, Liu et al. + +(2024f) formalize reasoning and acting with LLMs under a Bayesian adaptive MDP and propose RAFA, a theoretically grounded framework for orchestrating the reasoning and acting of LLMs. + +# 3.2.1 Refinement with Verifiers and Reflections + +A natural basis for modifying agent actions is the quality of their generated outputs—if the output is incorrect, the agent should attempt to correct it. However, ground-truth references are typically unavailable to the agent at test time. In such scenarios, agents often rely on verifiers, which are models or systems that provide an approximate measure of correctness, to guide action modifications. A special case arises when the verifier has access to ground-truth outcomes. Oracle verifiers (First et al., 2023; Xin et al., 2024a), which leverage correct answers, have shown significant performance improvements over baselines without verifiers (Huang et al., 2024a; Brown et al., 2024). However, their applicability is limited to scenarios where ground-truth data is readily available or easily accessible, such as in games or structured environments. + +In contrast, non-oracle (or imperfect) verifiers provide a more widely applicable solution. Their form varies depending on the task and knowledge source. For instance, Cobbe et al. (2021); Feng et al. (2023b); Snell et al. (2025) employ trained outcome reward models (ORMs) as verifiers to rerank responses. For more granular evaluation, Lightman et al. (2024) and Zhang et al. (2025b) train process reward models (PRMs) to serve as inference-time verifiers. By enabling the reward model to assess each reasoning step individually, PRMs generally yield greater improvements during inference compared to ORMs (Uesato et al., 2022; Tian et al., 2024). + +While reward models provide actionable signals about the quality of model responses, they are non-generative verifiers. As a result, they are unsuitable for verification approaches that require natural language feedback. For instance, synthesizing unit tests (Chen et al., 2023b; Hassid et al., 2024; Kapoor et al., 2024; Cook et al., 2024), commonly used in code generation tasks, necessitates verifiers capable of generating natural language. Broadly, generative verifiers are referred to as either critique models or LLM-as-judge models. In both cases, LLMs are either prompted or fine-tuned specifically for critique and evaluation. These models have been employed not only for output reranking (Vu et al., 2024) but also for providing valuable natural language feedback (Shinn et al., 2024; Shridhar et al., 2024; McAleese et al., 2024). However, recent studies have found that LLM-as-judge models generally underperform reward models (RMs) in terms of verification (Zhang et al., 2024e). To address this, researchers have sought to combine the strengths of both approaches under the Generative RM framework (Zhang et al., 2024e; Mahan et al., 2024; Liu et al., 2025b), aiming to unify the advantages of generative feedback with the precision of reward-based evaluation. + +Self-reflection or self-refinement approaches (Saunders et al., 2022; Madaan et al., 2024) aim to eliminate the need for additional, specialized verifier models by enabling the agent to critique and refine its own outputs. While some studies (Saunders et al., 2022; Madaan et al., 2024) have demonstrated empirical success, others highlight poor performance in the absence of robust verifiers (Stechly et al., 2023; Huang et al., 2024a; Stechly et al., 2024; Valmeekam et al., 2023; Shridhar et al., 2024). For a comprehensive review of recent advancements, see (Pan et al., 2024b). + +While verification methods can be deployed across a wider range of domains, they are susceptible to false positives—incorrect solutions that nevertheless pass verification. This limitation becomes particularly relevant when scaling up inference compute, as it can lead to diminishing returns on computational investment. Interested readers can refer to (Stroebl et al., 2024) for a comprehensive analysis of these trade-offs. + +# 3.2.2 Enhancement through Retrieval and Tool Utilization + +During the reasoning process, agents can retrieve external knowledge to refine their internal state representations, resulting in more accurate reasoning steps. The advantages of retrieval are particularly pronounced in knowledge-intensive tasks that demand multi-hop and long-horizon reasoning, where connecting multiple pieces of information is essential to arrive at a final answer. Through retrieval, agents can access intermediate information, verify connections between data points, and integrate them into their reasoning process (Shi et al., 2024; Jiang et al., 2024b; Wang et al., 2024m). Retrieval also addresses critical flaws in LLMs, such as hallucination and factual inaccuracies. By grounding responses in retrieved facts, models are less prone to generating erroneous information and more likely to produce reliable and trustworthy outputs. For + +
PerspectiveMethodCharacteristicRepresentative Work
DesigningDecentralizedNo hierarchy among agentsChen et al. (2023c); Chang (2024)
CommunicationCentralizedPresence of a central lead agentSuzgun & Kalai (2024a); Pan et al. (2024a)
ActionConditioned generationPerform reasoning based on other agents' outputsWang et al. (2024c); Gao et al. (2024b)
CoordinationDynamic adaptationAdapt actions based on specific tasksFourney et al. (2024); Yuan et al. (2024c)
+ +Table 4: Summary of inference scaling in multi-agent systems. + +instance, frameworks such as Verify-and-Edit (Zhao et al., 2023) and Chain-of-Knowledge (Li et al., 2024e) dynamically incorporate structured and unstructured knowledge sources to revise and correct intermediate reasoning steps within a reasoning chain. CRP-RAG (Xu et al., 2024b) improves multi-hop reasoning by dynamically adjusting reasoning paths and aggregating relevant knowledge. SelfRewardRAG (Hammane et al., 2024) enhances medical reasoning by combining RAG with self-evaluation, dynamically retrieving and synthesizing up-to-date medical information to ensure accurate response generation. By leveraging real-time data, such as clinical records from PubMed, it ensures responses are both current and precise. Another example is Think-on-Graph (Sun et al., 2023), a retrieval framework that integrates knowledge graphs (KGs) and text retrieval to deepen and refine reasoning in LLMs. GRATR (Zhu et al., 2024b) applies RAG techniques to enhance reasoning in multiplayer games with incomplete information. + +In addition to search and retrieval, agents can utilize other specialized tools to overcome their inherent limitations and significantly enhance reasoning performance. By integrating tools such as calculators, compilers, calendars, or specialized APIs, agents can access domain-specific resources, enabling them to operate more effectively in targeted applications (Yu et al., 2023b; Lu et al., 2024a; Li et al., 2025a). For instance, SCIAGENT (Ma et al., 2024b) leverages domain-specific tools like SymPy and WolframAlpha to enhance the reasoning capabilities of LLMs in scientific domains. Similarly, FinAgent (Zhang et al., 2024g) combines textual, numerical, and visual tools to improve performance in financial trading tasks. + +Moreover, external tools provide precise computational capabilities, allowing LLMs to transcend their limitations and perform complex numerical tasks with higher accuracy (Chen et al., 2023e; Li et al., 2023a). For example, MATHSENSEI (Das et al., 2024) employs tools such as Python, WolframAlpha, and Bing Search to tackle mathematical reasoning tasks across disciplines like algebra and calculus. TART (Lu et al., 2024b) integrates LLMs with tools for precise table-based reasoning tasks, such as table question answering and fact verification. + +Moreover, Anthropic introduced an open standard of Model Context Protocol (MCP) to seamlessly connect AI assistants with real-world data sources such as content repositories, business tools, and development environments. It provides a universal, scalable way for developers to create secure, two-way connections between AI tools and diverse data systems. While MCP holds significant promise, its adoption also introduces several challenges that must be addressed to support sustainable growth and responsible development. Hou et al. (2025) discussed some key issues, such as the absence of centralized security oversight, gaps in authentication and authorization, and difficulties in maintaining consistency across multi-step, cross-system workflows. + +# 3.3 Inference Scaling With Multi-agent Systems + +By strategically designing communication patterns and coordinating actions, multi-agent systems can achieve more sophisticated reasoning by harnessing the specialized capabilities of multiple agents (Guo et al., 2024b). Effective communication design involves establishing structured message exchanges and interaction patterns among agents, while action coordination focuses on reconciling diverse outputs and achieving consensus to determine the final action in the environment. + +# 3.3.1 Designing Communication Patterns + +A common communication pattern in multi-agent frameworks involves engaging multiple agents in debates or discussions (Liang et al., 2023b). For instance, the RECONCILE framework (Chen et al., 2023c) requires each agent to generate an answer accompanied by an explanation and a confidence score. The agents then participate in multi-round discussions to refine their responses, and a confidence-weighted voting mechanism + +aggregates the answers into a consensus. Similarly, SocraSynth (Chang, 2024) employs opposing LLM agents moderated by predefined contentiousness levels to explore diverse perspectives. Additionally, GroupDebate (Liu et al., 2024e) organizes agents into groups that conduct internal debates before sharing their results, reducing token costs while maintaining robust logical reasoning capabilities. + +Besides decentralized communication, prior works also consider sending messages to a central node for decision making. For example, Suzgun & Kalai (2024b) employs a language model as a multi-faceted conductor that is good at handling and integrating various queries. Moreover, AgentCood (Pan et al., 2024a) assigns an LLM the role of a central planner for coordination strategy generation and agent assignment. Compared with decentralized communication, it can lead to more efficient resource allocation but increase the system vulnerability to potential failure of the central node. + +# 3.3.2 Coordinating Action + +Effective action coordination among multiple agents is important for achieving the shared goals, especially given a dynamic and complex environment. Prior works explore various strategies which can enable agents to synergise agents' actions and optimize overall system reasoning and problem-solving performance. This approach leverages the strengths of different LLMs to overcome the limitations of individual models. + +One straightforward coordination strategy is chaining agents in a row, where agents can perform reasoning based on other agents' outputs. For example, Mixture-of-Agents (MoA) (Wang et al., 2024c) capitalizes on the cooperative nature of LLMs, allowing models to generate higher-quality responses by integrating and synthesizing contributions from multiple agents, achieving state-of-the-art performance. Similarly, Meta-Reasoning Prompting (MRP) (Gao et al., 2024b) assigns each agent to dynamically select the most effective reasoning method from a reasoning pool for a specific task, enabling the integration of diverse strategies to efficiently address multiple tasks. In addition, CoMM (Chen et al., 2024c) makes agents respond to discussions based on different role-playings. + +Moreover, coordination action can incorporate dynamic adaptation to task requirements. For example, Magentic-One (Fourney et al., 2024) introduces a lead agent as Orchestrator to conduct dynamic planning based on varied tasks. Gabriel et al. (2024) proposes a framework that deals with multi-hop queries, produces and executes task graphs, chooses suitable tools, and dynamically adapts to real-time changes. Additionally, EVOAGENT (Yuan et al., 2024c) dynamically generates various agents suitable for the given task and select those with high-quality outputs for result generation. + +# 4 Learning Algorithms + +Before delving into methodologies for training reasoning models, we first describe the foundational learning algorithms used to train the reasoner's policy and verifiers. These algorithms are defined by their precise loss functions. Note that learning algorithms are independent of the data curation process, which will be discussed in detail in Section 5. We begin by presenting commonly used learning algorithms for training reasoning models in Section 4.1, followed by a discussion on training verifiers in Section 4.2. + +# 4.1 Learning of Reasoner + +This section is organized into three key parts: (1) imitation learning through supervised fine-tuning, (2) reinforcement learning, and (3) preference learning. + +# 4.1.1 Imitation Learning - Supervised Fine-tuning + +Supervised fine-tuning (SFT) maximizes the log probabilities of the next token $y_{i}$ given the input prompt $x$ and previously generated tokens $y_{< i}$ . Training the policy model $\pi_{\theta}$ generally includes the steps to minimize the following loss function: + +$$ +L _ {\mathrm {S F T}} (\theta) = \mathbb {E} _ {x, y \sim \mathcal {D}} \left[ \sum_ {i} ^ {T} - \frac {1}{T} \log \left(\pi_ {\theta} \left(y _ {i} \mid y _ {< i}, x\right)\right) \right], \tag {2} +$$ + +where $\mathcal{D}$ is the SFT dataset that comprises inputs $x$ and ground truth labels $y$ . The ground truth labels can be either human-written or AI-generated reasoning process and answer response. The loss is equivalent to the next token prediction objective where the prompt input tokens are masked out and do not contribute to the loss. SFT is the often the default first (or only) step to train a base LLM to produce reasoning chains in zero-shot settings. SFT has also popularly used as an effective way to train smaller LLMs to imitate outputs generated by larger, more powerful LLMs, in a process known as knowledge distillation (Xu et al., 2024c). + +# 4.1.2 Reinforcement Learning for Reasoning + +Stiannon et al. (2020) and Ouyang et al. (2022) pioneered the application of reinforcement learning (RL), particularly proximal policy optimization (PPO) (Schulman et al., 2017), to improve not only reasoning capabilities but also the helpfulness and harmlessness of LLMs. Their work catalyzed a wave of innovations in preference learning and RL-based optimization techniques, as evidenced by subsequent studies (Rafailov et al., 2023; Ahmadian et al., 2024; OpenAI et al., 2024; DeepSeek-AI et al., 2025; Ramesh et al., 2024). + +Markov decision process. Most reinforcement learning (RL) approaches model text generation as a Markov Decision Process (MDP). In this framework, the process is defined by the following components: + +A set of states $S$ +- A set of actions $\mathcal{A}$ , +- A state-action transition distribution $P(s_{t + 1}|s_t,a_t)$ controlled by the environment, +- A reward function $R(s_{t},a_{t})\in \mathbb{R}$ that provides a scalar reward, and +- A policy $\pi (a_t|s_t)$ , which determines the actions to take based on the current state. + +At each time step $t$ , for a given state $s_t \in S$ , the agent selects an action $a_t$ and transitions to a new state $s_{t+1}$ , receiving a reward $R(s_t, a_t)$ from the environment. The set of available actions at state $s_t$ may be restricted to a subset of $\mathcal{A}$ , denoted $\mathcal{A}_{s_t}$ (i.e., $a_t \in \mathcal{A}_{s_t}$ ). In the context of autoregressive language modeling with LLMs, generally the next token depends on all the previous tokens. As such, in order to apply RL training for LLMs, one needs to define the states and actions of the problem such that they both satisfy the temporal dependency constraint of the language modeling task as well as the Markov property. One common approach is to define that the current state $s_t$ fully encapsulates all relevant information about the environment, in other words all previous tokens. This means the next state $s_{t+1}$ depends solely on the current state $s_t \in S$ and the chosen action $a_t \in \mathcal{A}_{s_t}$ . In this way, the current state no longer needs to retrieve information from the previous states to decide the next action. As such, the state transition is agnostic to the history or previous states and actions. Within this MDP framework, the goal of RL is to learn a policy model that selects optimal actions by maximizing the expected cumulative rewards (Eq. 1). + +- Action := token: Actions are defined at the token level, making the action space $\mathcal{A}_{s_t}$ is finite and equal in size to the vocabulary. The state $s_t$ consists of all preceding tokens, including the input prompt and previously generated output tokens. The next state $s_{t+1}$ is defined as the concatenation of the current state $s_t$ and the action taken $a_t$ , i.e., $s_{t+1} \coloneqq [s_t; a_t]$ . This category of methods defines rewards and related measures, such as values and advantages, at the token level. Works adopting this approach include most standard RLHF methods (Ouyang et al., 2022; Zheng et al., 2023b; Lee et al., 2023) as well as more recent fine-grained process-rewarding approaches (Yuan et al., 2024b; Cui et al., 2025). +- **Action := token chunk (step):** In this category of methods, actions are defined at the level of token chunks that semantically represent a reasoning step, separated by a special delimiter. As a result, the action space is infinite. The state $s_t$ consists of the prompt and the output tokens generated in previous reasoning steps. Rewards, value scores, and advantages are computed at the step level, with all tokens within a reasoning step $a_t$ sharing the same step-level score. This approach is particularly prominent in process supervision pipelines, as exemplified by DeepSeek-Math and VinePPO (Shao et al., 2024; Kazemnejad et al., 2024). + +
TypeState stAction atAction spaceExample work
Action := tokenAll previous to-kens (prompt and current response tokens)one tokenfinite, vocabulary size(Ouyang et al., 2022; Zheng et al., 2023b; Lee et al., 2023)
Action := stepAll previous tokens of prompt and previous stepsa chunk of tokens representing a “reasoning step”, separated by a special delimiterinfinite(Shao et al., 2024) (process supervision), (Kazemnejad et al., 2024)
Action := full re-sponsePromptentire responseinfinite(Shao et al., 2024) (outcome supervision), (DeepSeek-AI et al., 2025)
+ +Table 5: Definitions of MDP states and actions across different training schemes. + +- Action := full response: In this category, the entire response—comprising all output tokens—is treated as a single action. This transforms the reasoning problem into a one-step MDP with an infinite action space. This approach has been recently popularized by DeepSeek-R1 (DeepSeek-AI et al., 2025) and previously by DeepSeek-Math (outcome supervision) (Shao et al., 2024). A unique aspect of this formulation is that the full response may semantically include multiple reasoning steps, such as spontaneous backtracking and self-evaluation behaviors, as observed in DeepSeek-R1 (DeepSeek-AI et al., 2025). Regardless of the number of humanly recognizable reasoning steps within the response, the entire output is still considered a single action. To assign token-level value scores, rewards, and advantages, Shao et al. (2024); DeepSeek-AI et al. (2025) compute these values based on the full response $a_{t}$ and then distribute them uniformly across all tokens, similar to the step-level action setting. This formulation aligns with the concept of "bandit" prediction (with infinite action space) in REINFORCE-style RL (Nguyen et al., 2017; Kreutzer et al., 2017). + +Proximal Policy Optimization (PPO). As one of the primary variants of policy gradient methods, PPO has remained a popular and widely used RL algorithm (Schulman et al., 2017). To train the policy $\pi_{\theta}$ , PPO utilizes two additional models: the reference model $\pi_{\theta_{\mathrm{ref}}}$ , which represents the initial state of the policy, and the value model $V$ , which estimates the state value $V(s_{t})$ . PPO begins by sampling a state-action trajectory $\tau$ with consecutive state-action pairs $s_{t+1} \sim (s_{t}, a_{t})$ , then collects the respective intermediate or process reward (if available) and final (outcome) reward. Then, it computes the advantage $A(s_{t}, a_{t})$ of each action $a_{t}$ given the current state $s_{t}$ , which is defined as the relative strength of that specific action $a_{t}$ compared to the probability-weighted actions that the policy could probably have taken from $s_{t}$ . The advantage is formulated as + +$$ +A \left(s _ {t}, a _ {t}\right) := Q \left(s _ {t}, a _ {t}\right) - V \left(s _ {t}\right) := Q \left(s _ {t}, a _ {t}\right) - \mathbb {E} _ {a _ {t} ^ {\prime}} \left[ Q \left(s _ {t}, a _ {t} ^ {\prime}\right) \right], \tag {3} +$$ + +where $Q(s_{t},a_{t})$ represents the expected cumulative total reward that the policy is expected to obtain if it takes action $a_{t}$ from $s_{t}$ and continue to follow the current policy, while $V(s_{t})$ denotes the expected total rewards obtainable from state $s_{t}$ , known as the state value. The state value is equivalent to the expected value of $Q(s_{t},a_{t}^{\prime})$ marginalized over all probable actions the current policy $\pi_{\theta}$ may take from $s_{t}$ . If $A(s_{t},a_{t}) > 0$ , the action $a_{t}$ is encouraged, conversely, if $A(s_{t},a_{t}) < 0$ , the action $a_{t}$ is discouraged. After computing the advantages, PPO optimizes the policy $\pi_{\theta}$ according to the following loss function. + +$$ +L _ {\mathrm {P P O}} (\theta) = \mathbb {E} _ {\tau \sim \pi_ {\theta_ {0}}, P} - \frac {1}{T} \left[ \sum_ {t = 0} ^ {T} \operatorname {m i n} \left(\frac {\pi_ {\theta} (a _ {t} | s _ {t})}{\pi_ {\theta_ {o}} (a _ {t} | s _ {t})} A (s _ {t}, a _ {t}), \operatorname {c l i p} \left(\frac {\pi_ {\theta} (a _ {t} | s _ {t})}{\pi_ {\theta_ {o}} (a _ {t} | s _ {t})}, 1 - \epsilon , 1 + \epsilon\right) A (s _ {t}, a _ {t})\right) \right], \tag {4} +$$ + +where $t \in [0, T]$ is a time step within trajectory $\tau$ , $\pi_{\theta_o}$ is the fixed policy from previous episode or iteration, and $P$ is the transition distribution. The clip function, applied to the probability ratio $\frac{\pi_{\theta}(a_t|s_t)}{\pi_{\theta_o}(a_t|s_t)}$ , ensures that the policy does not deviate too drastically or rapidly from its previous version. This also helps prevent catastrophic failure or suboptimal local solutions. Additionally, a KL divergence term $\mathcal{D}_{\mathrm{KL}}(\pi_{\theta}||\pi_{\theta_{\mathrm{ref}}})$ is often incorporated into the loss function to constrain exploration during the later stages of training. $\pi_{\theta_{\mathrm{ref}}}$ is often a fixed initial reference policy that we do not want our policy to deviate too much from, while $\pi_{\theta_o}$ is a snapshot of the current policy from the previous iteration which is updated regularly. Throughout the training process, both the policy $\pi_{\theta}$ and value model $V$ are iteratively updated. + +REINFORCE & RLOO. REINFORCE is another popular policy gradient method (Sutton, 2018; Williams, 1992; Nguyen et al., 2017; Kreutzer et al., 2017) for RL. This method seeks to optimize the reward weighted objective of the entire response as: + +$$ +L _ {\mathrm {R E I N F O R C E}} (\theta) = \mathbb {E} _ {x \sim \mathcal {D}, y \sim \pi_ {\theta} (\cdot | x)} [ (R (y, x) - b) \nabla_ {\pi_ {\theta}} \log \pi_ {\theta} (y | x) ] \qquad (5) +$$ + +where $R(y, x)$ represents the final reward for output $y$ given input $x$ and $b$ is a baseline term introduced to reduce the variance of the gradient estimates. A widely used choice for $b$ is the moving average of all rewards observed during training (Williams, 1992; Ahmadian et al., 2024). + +Recently, the REINFORCE Leave-One-Out (RLOO) method (Kool et al., 2019; Ahmadian et al., 2024) has been proposed, which replaces the traditional baseline calculation with the leave-one-out average of trajectory rewards obtained through Monte Carlo (MC) sampling, as shown in Eq. 6 + +$$ +L _ {\mathrm {R L O O}} (\theta) = \frac {1}{k} \sum_ {i = 1} ^ {k} [ R (y _ {i}, x) - \frac {1}{k - 1} \sum_ {j \neq i} R (y _ {j}, x) ] \nabla_ {\pi_ {\theta}} \log \pi_ {\theta} (y _ {i} | x) \tag {6} +$$ + +where $k$ denotes the number of Monte Carlo samples. Unlike PPO, these algorithms do not rely on a parameterized value function (critic model) and instead depend solely on observed rewards. These methods share similarities with approaches such as Group-Relative Policy Optimization (GRPO) (Ramesh et al., 2024) and VinePPO (Kazemnejad et al., 2024), which will be discussed in detail below. + +Group-Relative Policy Optimization (GRPO). This algorithm has gained recent popularity through DeepSeek-R1 DeepSeek-AI et al. (2025), though it was also explored in earlier studies such as (Shao et al., 2024; Yang et al., 2024b;a; Team, 2024). It employs the same clipped surrogate objective as PPO, defined in Eq. 4 (Schulman et al., 2017). However, unlike PPO, which uses a parameterized value model to estimate the advantage $A(s_{t},a_{t})$ , this approach samples a group $G = [o_{1},o_{2},\dots,o_{g}]$ of Monte-Carlo outputs for a given input $x$ . It then computes the corresponding rewards $R = [r_1,r_2,\dots,r_g]$ , and determines the advantage of each output $o_i$ as the group-normalized reward + +$$ +A _ {\mathrm {G R P O}} (s _ {i, t}, a _ {i, t}) = A _ {\mathrm {G R P O}} (o _ {i}) = \frac {r _ {i} - m e a n (R)}{s t d (R)}. \tag {7} +$$ + +Then, the algorithm optimizes the policy $\pi_{\theta}$ by minimizing the following loss function. + +$$ +\begin{array}{l} L _ {\mathrm {G R P O}} (\theta) = - \frac {1}{| G |} \sum_ {i} ^ {| G |} \frac {1}{T _ {i}} \sum_ {t} ^ {T _ {i}} m i n \left\{\frac {\pi_ {\theta} (a _ {i , t} | s _ {i , t})}{\pi_ {\theta_ {o}} (a _ {i , t} | s _ {i , t})} A _ {\mathrm {G R P O}} (s _ {i, t}, a _ {i, t}), \right. \\ \left. \operatorname {c l i p} \left(\frac {\pi_ {\theta} \left(a _ {i , t} \mid s _ {i , t}\right)}{\pi_ {\theta_ {o}} \left(a _ {i , t} \mid s _ {i , t}\right)}, 1 - \epsilon , 1 + \epsilon\right) A _ {\mathrm {G R P O}} \left(s _ {i, t}, a _ {i, t}\right) \right\} \tag {8} \\ \end{array} +$$ + +Variants of GRPO, such as DAPO (Yu et al., 2025), have also been introduced to alleviate issues with GRPO like length bias and inappropriate penalties for responses that exceed the context length. + +# 4.1.3 Preference Learning + +Preference learning, particularly learning from human feedback, is a widely used post-pretraining alignment stage for LLMs. Its goal is to encourage the generation of responses that align with human preferences or + +desired values, such as helpfulness or harmlessness (Ouyang et al., 2022; Bai et al., 2022; Ganguli et al., 2022). The data collection process for this stage typically involves prompting an unaligned LLM to generate multiple responses for a given input. Human annotators are then presented with pairs of responses and asked to select the preferred one. The resulting preference dataset is used to train a reward model. This reward model subsequently provides online reward scores for policy trajectories during PPO training, a process commonly referred to as reinforcement learning from human feedback or RLHF (Schulman et al., 2017; Ouyang et al., 2022; Touvron et al., 2023), as well as AI feedback (Lee et al., 2023). + +Preference learning has evolved beyond conventional reinforcement learning (RL)-based methodologies with the introduction of Direct Preference Optimization (DPO) (Rafailov et al., 2023) and its subsequent variants (Ethayarajh et al., 2024; Lai et al., 2024; Hong et al., 2024; Saeidi et al., 2024; Meng et al., 2024; Azar et al., 2024). DPO proposes using the policy language model itself to directly model human reward preferences from the preference dataset. This formulation eliminates the need for a separately trained reward model, instead optimizing the policy on the preference dataset with a simple binary classification loss. Formally, the policy $\pi_{\theta}$ is optimized using a preference dataset $\mathcal{D}$ by minimizing the loss function: + +$$ +L _ {\mathrm {D P O}} (\theta) = - \mathbb {E} _ {(x, y _ {w}, y _ {l}) \sim \mathcal {D}} \left[ \log \sigma \left(\beta \log \frac {\pi_ {\theta} (y _ {w} | x)}{\pi_ {\mathrm {r e f}} (y _ {w} | x)} - \beta \log \frac {\pi_ {\theta} (y _ {l} | x)}{\pi_ {\mathrm {r e f}} (y _ {l} | x)}\right) \right], \tag {9} +$$ + +where $y_{w}$ and $y_{l}$ represent the winning (chosen) and losing (rejected) outputs for input $x$ , respectively. DPO has gained popularity due to its simplicity and stability, bypassing the engineering complexity and challenges associated with PPO-based techniques. However, DPO is not without limitations, such as implicit biases toward longer responses and performance degradation over extended training periods (Ethayarajh et al., 2024; Meng et al., 2024). Subsequent advancements, including KTO (Ethayarajh et al., 2024), iPO (Azar et al., 2024), SimPO (Meng et al., 2024), ORPO (Hong et al., 2024), Step-DPO (Lai et al., 2024), and combination methods (Saeidi et al., 2024), have addressed many of these shortcomings. + +While the above learning algorithms are formulated for single turn input-to-output tasks, it is also generalizable to multi-turn conversations as well as function-calling agentic workflows. In such scenarios, the next state $s_{t+1}$ may not always be a concatenation of all previous states $s_{\leq t}$ and actions $a_{\leq t}$ , but it also depends on incoming response $h_t$ from an outside environment, which can come from a follow-up user instruction or the returned result from a function call. In other words, one may define $s_{t+1} := [s_t; a_t; h_t]$ . + +# 4.2 Learning of Verifiers and Reward Models + +Verifiers play an important role in reasoning systems, improving performance both through training time credit assignment (Ouyang et al., 2022; Ziegler et al., 2019; Stiennon et al., 2020) and inference-time scaling verification (Snell et al., 2024). Reward modeling in the reasoning settings focuses on verifying the correctness of the reasoning chain, rather than evaluating using more general criteria, like helpfulness or safety (Ouyang et al., 2022). As a result, reward model training in reasoning is typically formulated as a binary classification problem between correct and incorrect reasoning steps. Based on label granularity, reward modeling is further categorized into outcome reward modeling (Section 4.2.1) and process reward modeling (Section 4.2.2). More recently, generative models for verification (Section 4.2.3) have emerged as a popular approach that produces actionable and explainable natural language feedback alongside rewards. In this section, we cover common training approaches for verifiers; In Section 6.1.3, we posit that verification itself may benefit from being studied as a reasoning problem itself, highlighting both concrete methods and recent analysis of failure modes in reasoning settings. + +# 4.2.1 Outcome Reward Models (ORM) + +The goal of outcome reward models (ORMs) for reasoning is to provide a scalar reward for a full trajectory. Given a dataset $\mathcal{D}$ of input prompt $x$ and sampled outputs $y$ with corresponding correctness label $c\in \{0,1\}$ , the goal of outcome reward modeling is to train the outcome reward model $r_{\theta}$ using the loss + +$$ +L _ {\text {o r m}} (\theta) = \mathbb {E} _ {x, y \sim \mathcal {D}} [ c \log \sigma (r _ {\theta} (x, y)) + (1 - c) \log (1 - \sigma (r _ {\theta} (x, y))) ], \tag {10} +$$ + +where $\sigma$ is the sigmoid function. Alternatively, one can train ORMs with a pairwise formulation. Here, the correctness labels are not explicitly encoded in the loss function, but are used to categorize multiple sampled + +outputs as correct or incorrect. From there, we can form pairs of outputs $\{y_w, y_l\}$ , where $y_w$ reaches the correct outcome (e.g., correct answer for a math problem) and $y_l$ reaches an incorrect outcome. The reward model $r_\theta$ is then typically trained with the Bradley-Terry loss, similar to that in DPO training (Equation 9). + +$$ +L _ {\text {o r m}} (\theta) = - \mathbb {E} _ {x, y _ {w}, y _ {l} \sim D} \left[ \log \left(\sigma \left(r _ {\theta} (x, y _ {w}) - r _ {\theta} (x, y _ {l})\right)\right) \right], \tag {11} +$$ + +Many other pairwise loss functions can be employed, such as hinge loss or other margin-based losses, focal loss, or variations of the Bradley-Terry loss. However, recent work (Liu et al., 2024a) has categorized the impact of loss functions, finding that the typical Bradley-Terry loss yields the best-performing ORM. + +# 4.2.2 Process Reward Models (PRM) + +While outcome reward models are relatively simple to train, outcome-driven verification may encourage incorrect reasoning chains that lead to the correct outcome. As such, recent work has sought to train process reward models (PRMs) to assess correctness for each step in the solution. This requires more fine-grained labels than ORM training. Specifically, assume that for an output $y = (a_{1},\dots ,a_{T})$ , we obtain process-level supervision of the form $c_{1},\ldots ,c_{T}$ , where $c_{t}$ is a binary indicator of step $a_{t}$ correctness. Then, the step-wise cross-entropy loss below is applied. + +$$ +L _ {p r m} (\theta) = \mathbb {E} _ {x, y \sim \mathcal {D}} \left[ - \frac {1}{T} \sum_ {t = 1} ^ {T} \left(c _ {t} \log \sigma \left(r _ {\theta} \left(x, y _ {\leq t}\right)\right) + \left(1 - c _ {t}\right) \log \sigma \left(1 - \sigma \left(r _ {\theta} \left(x, y _ {\leq t}\right)\right)\right) \right] \right. \tag {12} +$$ + +Above, $y_{\leq t}$ denotes the output prefix up to and including step $t$ . In practice, collecting step-level annotations $c_t$ can be extremely expensive. As a result, recent work has used variants of Monte Carlo Tree Search to automatically obtain said annotations. Specifically, the annotation for a reasoning step is obtained by rolling out the response until completion from the intermediate step, then using the outcome accuracy as a proxy for correctness (Wang et al., 2024g; Jiao et al., 2024a; Wang et al., 2024k; Dou et al., 2024a; Luo et al., 2024b; Setlur et al., 2024b). As a concrete example, suppose we roll out five completions randomly from the same prefix $y_{\leq t}$ , with three rollouts arriving at the correct answer. Then, the confidence that the prefix $y_{\leq t}$ is correct can be approximated as 0.6. These coarse signals can then be used to train a PRM. These two general approaches to constructing PRM training data have associated pros and cons: Collecting human annotations is expensive, but does not overfit PRM training to one particular policy. MCTS-based approaches yield annotations relatively quickly, but do not generalize beyond the policy from which samples are collected (Zheng et al., 2024; Setlur et al., 2024a). + +# 4.2.3 Generative Verifiers + +ORMs and PRMs are discriminative verifiers, and are therefore unable to generate natural language to support their scores. However, natural language reasoning for evaluations is valuable both as actionable feedback and as an explainable mechanism. As a result, generative verifiers have been proposed to assess responses and provide natural language feedback. Generative verifiers have progressed from prompting frontier LLMs to evaluation-specific finetuning, relying on many of the same learning algorithms presented in Section 4.1. As such, the focus of this section is largely on training data curation. + +Finetuned generative verifiers Generative verifiers are broadly classified as critique models or LLM-as-judge models. Critique models typically take as input a question and model response, and produce a critique with actionable feedback in natural language. The foundation of critique model training is critique training data. To construct training data, intentionally incorrect outputs are sampled from a policy model. Then, these outputs are corrected, usually with stronger model or human annotations. Using such samples, past methods (Wang et al., 2023c; Xi et al., 2024) have employed SFT (Section 4.1.1) to train critique models to imitate critiques. Other methods (Yao et al., 2023c; McAleese et al., 2024) have used used the typical RLHF workflow (Section 4.1.3), first training a reward model to use during PPO training. More recently, outcome-based RL (e.g., GRPO, as presented in Section 4.1.2) has been used for training, relying on either hand-crafted rewards (Akyurek et al., 2023) or execution feedback for code critique (Xie et al., 2025). + +LLM-as-judge models are a more general class of generative verifiers trained to evaluate model responses based on different protocols (pairwise evaluation, 1-5 rating, binary classification). These models rely on preference datasets, either annotated by a strong model or by humans. For example, to train a pairwise LLM-as-judge, one would collect a dataset of paired model responses for a given input prompt, then ask either a human or strong LLM to pick which response is better. Then, natural language explanations are distilled from stronger models, with distilled samples being categorized as correct or incorrect if the preference matches the annotation. From here, earlier LLM-as-judges (e.g., (Li et al., 2023b; Zheng et al., 2023a)) trained with SFT (Section 4.1.1), while newer approaches (Wang et al., 2024f; Hu et al., 2024) have used DPO (Section 4.1.3). + +Discriminative-generative hybrid verifiers Because generation is a more difficult task than classification, generative verifiers have often lagged discriminative reward models in benchmark performance. Recent work (Zhang et al., 2024f; Mahan et al., 2024) has sought to unify the two under the Generative Reward Model umbrella. Here, models use similar datasets to those used to train LLM-as-judge models, but augment the SFT loss with an answer-token loss. Concretely, given a dataset $\mathcal{D}$ with samples comprised of an input $x$ , model response $y$ , and outcome label $c$ (e.g., "Yes"/"No" for correctness), the loss + +$$ +L _ {G e n R M} (\theta) = - \mathbb {E} _ {x, y, c \sim \mathcal {D}} \left[ \log \left(\pi_ {\theta} (c | x, y) \right] \right. \tag {13} +$$ + +is added to the typical language generation losses (e.g., SFT or DPO loss) that are used to train the model to produce natural language explanations. Here, $\pi_{\theta}$ is the generative reward model being trained. + +# 5 Learning to Reason + +In Section 3, we explored various methods for enhancing reasoning through inference-time computation. While these approaches have proven effective in many scenarios, they come with notable limitations, such as constrained improvements in reasoning capabilities (since model parameters remain unchanged) and the requirement for substantial computational resources during inference. With the advent of OpenAI o1 (OpenAI et al., 2024), there has been a growing emphasis on improving reasoning through training-time methods. Recently, Deepseek-R1 (DeepSeek-AI et al., 2025) demonstrated that training-time approaches can achieve reasoning improvements comparable to, or even surpassing, those of inference-scaling methods. Reflecting this trend, this section delves deeper into the role of training in advancing reasoning capabilities. + +Specifically, we explore the data recipe, which focuses on constructing data (reasoning trajectories) tailored for reasoning tasks to facilitate training. At a high level, trajectory collection can be viewed as a form of simulation, where the generator produces reasoning steps—potentially incorporating calls and outputs from external tools—in response to either synthetic or real-world inputs. The primary challenge lies in ensuring that this simulation is both realistic and diverse while simultaneously providing meaningful supervision (reward) throughout the process. Depending on the architecture, as outlined in Section 2.3, this typically involves designing inputs (such as perception in single-agent systems or interaction in multi-agent systems) and outputs (such as actions in single-agent systems or coordination in multi-agent systems). + +Furthermore, we explore the model recipe. Depending on the learning algorithms (Section 4), the model recipe can be 'offline' (non-RL, e.g., SFT and offline RL, e.g. DPO), which focuses on extracting supervision (reward) from the collected trajectories and leveraging them for training. It can also be 'online' (most of RL algorithms, e.g., GRPO and PPO), where there is no need to collect trajectories beforehand, but learning occurs directly on the questions and their rewards. Similar to Section 3, we start with standalone LLMs, detailing how each of their components is trained (Section 5.1). Building on this foundation, we expand the discussion to single-agent systems (Section 5.2) and multi-agent systems (Section 5.3). + +# 5.1 Learning to Reason with Standalone LLM + +This section examines how standalone LLMs can be trained for reasoning tasks. For 'offline' methods, the process typically involves collecting reasoning trajectories, that lead to both correct and incorrect outcomes, followed by further training the LLM on these trajectories. In contrast, for 'online' methods, learning occurs + +
PerspectiveMethodCharacteristicRepresentative Work
Constructing PromptsQuestion AugmentationExpand knowledge depth and breadth of seed questionsLuo et al. (2023b); Yu et al. (2024c)
Graph-based SynthesisSynthesize prompts guided by structured taxonomyLi et al. (2024a); Tang et al. (2024)
Collecting TrajectoriesRejection SamplingFilter low-quality trajectories from current policyDong et al. (2023)
Special Reasoning PatternImitate human-like reasoning behaviorYuan et al. (2024a); Qin et al. (2024)
Reasoning DistillationDistill reasoning capability from frontier reasoning modelHuang et al. (2024d)
Training from TrajectoriesImitation LearningLearn the behavior directly from the collected trajectoriesYu et al. (2024c)
Preference LearningOptimize preference between pos. and neg. trajectoriesJiao et al. (2024a)
Latent ReasoningCompress trajectory length using implicit reasoning tokensHao et al. (2024b)
+ +Table 6: Summary of learning to reason with standalone LLM. + +directly based on the sampled reasoning chains and their corresponding rewards. While much of the research focus has been on sampling high-quality outputs (i.e., trajectories), methods for generating a robust and diverse set of problems, or model inputs, have also garnered attention. We begin by detailing the process of collecting trajectories, which includes constructing inputs (Section 5.1.1) and obtaining outputs (Section 5.1.2). Subsequently, we describe how the LLM can be trained using the collected trajectories (Section 5.1.3). + +# 5.1.1 Constructing High-quality Prompts for Reasoning + +To effectively drive knowledge distillation and model-seeking, we must curate a diverse collection of high-quality prompts that comprehensively span the target knowledge space. Relying on a narrow or homogeneous prompt set—even when sourced from a strong base model—limits exploration and undermines both distillation and reinforcement learning processes. By contrast, carefully crafted prompts expand the model's exploratory capacity, yielding richer representations and more robust downstream performance. As such, this section covers methods for collecting or synthesizing more challenging prompts. + +Question augmentation A straightforward approach to generating additional inputs is to directly augment existing datasets using frontier LLMs. For example, Xu et al. (2024a) propose using LLMs to "evolve" existing prompt sets, expanding their depth (e.g., more complex instructions) and breadth (e.g., rarer concepts). Yu et al. (2024c) have proposed two main approaches to augment existing questions. One is simply rewriting using frontier LLMs, and the other one is self-verification, which transforms an condition in the question into unknown variable, shows the original answer, and proposes a new question by querying the value of the unknown variable. Luo et al. (2023b) adopt a comparable strategy, employing a question generator to iteratively produce both harder and easier versions of a given question, as inspired by the instruction evolution approach of Xu et al. (2024a). The synthesized instructions are further refined using a reward model to ensure quality. + +Knowledge graph-based synthesis Directly augmenting prompts with LLMs can increase the size of the training set but does not inherently enhance diversity. To address this, knowledge graphs—structured taxonomies for organizing reasoning domains—have been utilized to construct input prompts with broader coverage. For instance, Li et al. (2024a) employ a frontier LLM to generate a knowledge graph directly, while Tang et al. (2024) task a frontier LLM with extracting a taxonomy from a seed dataset. These knowledge graphs are then used to progressively synthesize challenging questions, which are subsequently used to prompt larger teacher LLMs, resulting in high-quality instruction-tuning datasets with wider knowledge coverage. Additionally, Jiao et al. (2024b) leverage relation graphs derived from web documents to synthesize pretraining data, improving relation-based logical reasoning capabilities. + +# 5.1.2 Collecting High-quality Reasoning Trajectories + +Beyond constructing high-quality prompts, researchers also refine outputs to collect better trajectories for training. These techniques often sample outputs that follow specific reasoning patterns, such as lengthy reasoning processes with self-reflection, and retain those that meet higher quality standards based on ground-truth labels. Consistent with our architecture definitions in Sec. 2.3, we treat the learned verifier as part of the environment in the agentic system. Consequently, this section focuses exclusively on methods that + +utilize existing ground-truth labels—such as answer labels in maths or test cases for code generation—while deferring discussion of methodologies that rely on learned verifiers (reward models or LLM-judges) to Sec. 5.2. + +Rejection sampling Rejection sampling (Dong et al., 2023) aims to select higher-quality samples by repeatedly sampling from the policy model (reasoner). Quality is determined through two primary sources: (1) a learned verifier, which we discuss in Section 5.2, and (2) direct comparison with ground-truth labels (when available), where samples inconsistent with the ground-truth labels are discarded. Yuan et al. (2023) apply this idea to mathematical reasoning, introducing edit distance to ensure diversity among trajectories. Zelikman et al. (2022) propose STaR to incorporate the correct answer into the instruction, prompting LLMs to iteratively refine incorrect reasoning traces and generate higher-quality trajectories. Tong et al. (2024) employ an up-sampling strategy to increase the proportion of successful trajectories for more challenging questions. This approach has become a standard technique for iterative model self-improvement, as demonstrated in works such as (Jiao et al., 2025; Guan et al., 2025; Dou et al., 2024b). + +Encourage special reasoning pattern Another line of research focuses on leveraging human-like reasoning behaviors—such as self-reflection, deep reasoning, and thinking-before-action—to improve reasoning accuracy and reduce hallucinations. One notable approach is Reasoning-as-Planning (RAP) (Hao et al., 2023), which divides reasoning into three steps: thinking, taking action, and observing (inferring) changes in the environment. When applied to text-based reasoning problems, LLMs simulate environment states after taking actions, leading to more accurate reasoning. Building on this idea, Yuan et al. (2024a) and Chen et al. (2023a) use frontier LLMs like GPT-3.5 and GPT-4 to synthesize trajectories with this pattern for reasoning problems, facilitating imitation learning. + +Besides, inspired by the success of long and deep reasoning revealed by OpenAI's o1 model, which incorporate self-reflection and search, some researchers propose imitating this process through rule-based synthesis. For instance, Qin et al. (2024) flatten MCTS trajectories, including failed branches, and ask general models to generate bridge sentences for natural transition from the failed nodes to the ones along the successful paths. + +Reasoning distillation Several studies distill reasoning patterns from models capable of producing good reasoning chains (e.g., OpenAI o1) to replicate similar behaviors in smaller models. For example, Huang et al. (2024d), NovaSky Team (2025), Bespoke Labs (2025) and Muennighoff et al. (2025) distill reasoning chains from models like OpenAI-o1, Qwen-QWQ-32B, DeepSeek-R1, and Gemini Thinking Experimental, respectively. Min et al. (2024) diversify this approach by distilling from multiple reasoning models and aggregating outputs into a unified format. + +# 5.1.3 Training from Trajectories + +Using the collected trajectories, training can be conducted by designing the input and output formats for the algorithms discussed in Section 4. + +Supervised Fine-Tuning (SFT) As discussed in Sec. 4.1.1, the most straightforward approach to training reasoning-capable LLMs is to fine-tune a model using SFT on collected trajectories. Methods such as (NovaSky Team, 2025; Bespoke Labs, 2025; Huang et al., 2024d) and (Min et al., 2024) utilize SFT with a modest number of data samples (4K-20K) to replicate the reasoning capabilities of OpenAI's o1 model. Recent SFT approaches have shifted focus to data scaling, with Xu et al. (2025e) exploring the impact of increasing data quantity up to 1 million CoT samples. Their findings demonstrate that performance improves with data scale, albeit with diminishing returns. In contrast, Muenighoff et al. (2025) adopt a sample-efficient approach, curating a high-quality 1K-sample reasoning dataset for fine-tuning. They show that this smaller dataset, combined with strategic inference-time prompting, achieves performance comparable to models trained on larger datasets. Similar strategies have been applied in domain-specific reasoning models, such as earlier math reasoning systems Yu et al. (2023a); Yue et al. (2023). + +Preference learning and reinforcement learning While SFT approaches have shown effectiveness, other studies demonstrate that preference learning further enhances performance. Min et al. (2024) study DPO, while Xu et al. (2025e) explore various post-training preference learning methods. Hui et al. (2024), + +Min et al. (2024), and Jiao et al. (2024a) all employ DPO with preference pairs derived from code test cases, outcome correctness, and a PRM trained on automatic supervision, respectively. Another line of work focuses on step-level DPO to optimize reasoning action selection. Specifically, Zhang et al. (2024h) use Tree-of-Thought (Yao et al., 2023a) to estimate outcome rewards and backpropagate them to intermediate nodes for quality assessment. Step-level DPO is then applied to pairs sharing the same trajectory prefix but with contrasting next actions. Lai et al. (2024) directly use GPT-4o to identify the earliest incorrect reasoning step and construct contrastive step-level DPO pairs for preference learning. Yuan et al. (2024d) adopt an iterative DPO approach in a self-rewarding setting, where the policy model itself acts as an LLM-as-judge to progressively improve its capabilities. + +In addition to preference learning, RL with verifiable answer labels also demonstrate importance in improving reasoning, where rule-based rewards by checking the correctness of sampled solutions are employed rather than reward models.6 Lambert et al. (2024) use both math reasoning and instruction following data for outcome-based reinforcement learning without reward models. Deepseek-R1 (DeepSeek-AI et al., 2025) further reveal the potential of pure reinforcement learning with verifiable answers. Yu et al. (2025) provide valuable reproduction of Deepseek-R1 on Qwen2.5-32B, including open-sourced data, code, and technical details about loss function design, reward shaping, and dynamic sampling. + +Training with latent reasoning Typical reasoning models generate long reasoning chains and have demonstrated strong empirical performance. However, this comes at the cost of increased inference time, as they produce lengthy natural language reasoning traces. These traces often contain many tokens that improve the flow and coherence of the output, with only a small fraction directly contributing to the reasoning process. To address this inefficiency, an alternative approach, known as latent reasoning, focuses on representing reasoning trajectories implicitly. This is achieved either by omitting intermediate reasoning tokens entirely or by compressing them into specialized reasoning tokens or continuous vector representations. + +Earlier work in continuous reasoning focused on compressing natural language reasoning chains into a smaller number of tokens. Deng et al. (2023b) employ knowledge distillation to encode the knowledge from natural language reasoning tokens into intermediate representations of the student model. During inference, the model generates only the final answer without producing additional rationale. This approach is further refined through curriculum learning (Deng et al., 2024b), which gradually removes reasoning tokens during training to reduce distribution mismatch. + +However, removing all explicit intermediate reasoning tokens may compromise the model's expressivity (i.e., ability to articulate complex reasoning) (Prystawski et al., 2023). A natural trade-off is to retain a limited number of reasoning tokens, making them implicit to enhance expressiveness while preserving performance. Goyal et al. (2024) introduce learnable tokens during pre-training and fine-tuning within standard CoT trajectories, enabling the model to perform additional computation before generating an output token. Wang et al. (2023d) explore various techniques for compressing reasoning steps from training trajectories into a fixed set of planning tokens. At the start of each reasoning step, the model generates a planning token, whose encoded "knowledge" guides the generation of more coherent outputs. Hao et al. (2024b) propose using the last-layer hidden states before the language modeling head as implicit reasoning token representations, feeding these back into the model to generate the next token auto-regressively. These implicit representations are optimized in a stage-wise manner, akin to the approach of Deng et al. (2024b). Xu et al. (2025f) propose an approach for continuous-space reasoning that does not require modifying the LLM reasoner. Specifically, they employ a lightweight fixed assistant model to generate instance-specific soft thought tokens speculatively as the initial chain of thoughts, which are then mapped into the LLM's representation space via a trainable projection module. + +# 5.2 Learning to Reason with Single-agent Systems + +As discussed in Section 2.3, agentic systems enhance the reasoning capabilities of standalone LLMs by incorporating agent-environment interactions. These interactions enable the agent to perceive its environment + +
PerspectiveMethodCharacteristicRepresentative Work
Action-Environment InteractionsIncorporating FeedbackUse environment feedback to filter trajectoriesNi et al. (2024); Xin et al. (2024b)
Training External ModelsTrain models (e.g., to critic) from the interactionWu et al. (2024c)
Search with VerifiersUse verifiers to identify better reasoning trajectoriesWan et al. (2024c)
Distillation from TeacherDistill capability from frontier reasoning modelGou et al. (2024); Ma et al. (2024a)
Training from TrajectoriesSupervised Fine-TuningCollected offline trajectories + learn via SFTDou et al. (2024b); Yin et al. (2024)
Reinforcement LearningLearning directly on questions and their rewardsShao et al. (2024)
Learning with RefinerTrain refiner model to iteratively improve the last-round solution.Xiong et al. (2025)
+ +Table 7: Summary of learning to reason with single-agent systems. + +and accordingly perform actions. This section explores how simulation is achieved through the design of such perceptions and agent actions. It then covers training methods—how agents are trained using these trajectories. Additionally, we discuss how predefined patterns are leveraged when collecting trajectories. + +# 5.2.1 Trajectory Collection through Agent-Environment Interactions + +By interacting with the external world in different ways, agents can effectively construct trajectories that help refine their reasoning process. These interactions to enrich reasoning take the form of (a) incorporating execution feedback, (b) training external models to help reasoning, (c) search with verifiers, and (d) trajectory distillation from stronger teacher agents. + +Incorporating execution feedback Through active interaction with the environment, the agent can obtain valuable feedback for trajectory filtering. Building on STaR (Zelikman et al., 2022) (discussed in Sec. 5.1.2), NExT (Ni et al., 2024) leverages unit tests (Ye et al., 2022) to obtain self-generated rationales that lead to correct solutions for training. AlphaProof (AlphaProof & teams, 2024) and DeepSeek-Prover (Xin et al., 2024a) solve formal theorem-proving problems by generating potential solutions and validating them through interaction with the Lean proof assistant (De Moura et al., 2015), either proving or disproving the solutions. Xin et al. (2024b) further improve DeepSeek-Prover by introducing RMaxTS, an exploration strategy driven by intrinsic rewards to generate diverse proof paths. Furthermore, the agent can integrate environmental information directly into the training process to improve its reasoning capabilities. For example, Cummins et al. (2023) train a 7B model from scratch, achieving significantly improved code optimization performance by leveraging optimizing transformations from external LLVM compilers. + +Training external models The agent can leverage its interaction with the environment to train external models that can in turn help the agent's reasoning. For example, Wu et al. (2024c) train a critic model to identify relatively easier problems for the policy to explore and guide the policy in searching for deeper proof paths. Re-ReST (Dou et al., 2024b) proposes training a refiner to correct the agent's wrong output based on environmental feedback. + +Reasoning search with verifiers Search-based methods address sampling challenges for more difficult problems by leveraging external reward models or generation probabilities to guide decoding. For example, Wan et al. (2024c) develop a Monte Carlo Tree Search (MCTS)-based approach to identify better reasoning trajectories. Each tree node represents either a sentence or token, and a learned LLM-based value function and outcome reward model are used to estimate expected returns during the search process. This method can be applied for both inference-time path selection and training-time imitation learning. + +Guan et al. (2025) rely solely on outcome labels to iteratively update the policy model and a process preference model (PPM) through MCTS. The PPM approximates the Q-value of intermediate reasoning steps. Lai et al. (2024) use an LLM-as-judge to identify the first reasoning step in a sampled trajectory that contains an error. The trajectory up to the error is then used to sample new outputs, and DPO preference pairs are formed from correct and incorrect outputs. Zhang et al. (2024h) focus on unsupervised settings where answer labels are unavailable. Discarded steps collected during the search process are treated as negative actions, contrasting with the steps retained in the final path for DPO training. For multi-step reasoning in dynamic environments, such as web navigation, Putta et al. (2024) propose combining guided MCTS with self-critique to facilitate more effective exploration. + +Trajectory distillation from stronger teacher agents To tackle challenging mathematical problems, Gou et al. (2024) curate interactive tool-use (e.g., code execution) trajectories using GPT-4, derived from existing mathematical datasets across various domains. Similarly, MuMath-Code (Yin et al., 2024) employs multi-perspective data augmentation to generate diverse math questions and synthesizes code-nested solutions using GPT-4. Beyond mathematics, other domains have also been explored. For instance, Ma et al. (2024a) construct a tool-augmented training set for scientific reasoning by prompting GPT-4. CoGEX (Weir et al., 2024) extends LLMs' program synthesis capabilities to tasks that are not easily expressible as code, such as commonsense reasoning and sarcasm understanding. To collect training trajectories, GPT-4 is used to transform the Alpaca dataset (Taori et al., 2023) into the required format. Ke et al. (2025b) explore collecting trajectories from a more capable generative reward model (GPT-4o) to train a finance-expert model by identifying and correcting the first erroneous step in the reasoning process. Additionally, AgentBank (Song et al., 2024) introduces the largest dataset of agent-environment interaction trajectories, comprising 16 tasks across 5 distinct agent skill dimensions. This dataset is created by annotating actions and their corresponding rationales using LLMs of varying scales, addressing key challenges in trajectory collection, such as scalability. + +In addition to leveraging trajectories from GPT-4, Gou et al. (2024) introduce output space shaping by incorporating samples generated by the agent itself. Specifically, they train the agent on both self-sampled correct trajectories and those corrected by a teacher model, promoting diversity in plausible reasoning steps. + +# 5.2.2 Agent Training from Trajectories + +Supervised Fine-Tuning (SFT) After collecting trajectories, many methods apply supervised fine-tuning (SFT) to train the agent, enabling models with little prior experience in agentic environments to adapt quickly. Dou et al. (2024b) enhances agent reasoning by incorporating refiner-corrected samples into the self-training process. NExT (Ni et al., 2024) uses filtered trajectories to train agents for program repair tasks, while Weir et al. (2024) fine-tune agents on collected trajectories to enable the generation and emulation of pseudo-programs. AlphaProof (AlphaProof & teams, 2024) and DeepSeek-Prover (Xin et al., 2024a) iteratively train and refine the policy model using verified proofs, improving performance in theorem proving tasks. Similarly, Gou et al. (2024), Yin et al. (2024), Ma et al. (2024a), and Song et al. (2024) fine-tune agents on agent-environment interaction trajectories generated by proprietary LLMs, enhancing reasoning capabilities across diverse domains. Notably, MuMath-Code (Yin et al., 2024) adopts a two-stage training strategy, first fine-tuning on pure CoT data and then on code-nested data. Chen et al. (2024e) introduce Agent-FLAN, a fine-tuning method designed to improve LLMs' agent capabilities while addressing challenges such as distribution shifts and hallucinations in training data. By redesigning the training corpus and incorporating negative samples, Agent-FLAN enhances both agent-specific and general capabilities of LLMs. + +Reinforcement Learning (RL) Beyond imitation learning through SFT, recent approaches have leveraged reinforcement learning to further enhance reasoning capabilities. Notably, GRPO (Shao et al., 2024; DeepSeek-AI et al., 2025), which employs verifiable outcome rewards during online RL training, has demonstrated strong empirical performance. Havrilla et al. (2024) investigate multiple RL algorithms (e.g., Expert Iteration, PPO) for math reasoning tasks, finding that incorporating outcome reward models has negligible effects on performance for both Expert Iteration and PPO. Similarly, Shao et al. (2024) observe relatively minor performance gains when using PRMs during GRPO training. Yang et al. (2024b) explore using a PRM to "shape" outcome rewards by using a linear combination of outcome and PRM rewards for GRPO training. In contrast, Wang et al. (2024g); Luo et al. (2023a); Jiao et al. (2024a) demonstrate that using a trained PRM during PPO training leads to significant performance improvements. Similar gains are observed in the code generation domain (Dai et al., 2024), where the PRM serves both as a reward signal and as an initial checkpoint for the value function during PPO. Zhang et al. (2024a) iteratively train both a PRM and LLM, while Setlur et al. (2024b) provide a new perspective by comparing Q-value-based PRMs with advantage function-based ones, showing improved learning efficiency and performance in guided reinforcement learning. Concurrently, Gao et al. (2024a) address reward hacking (Casper et al., 2023)—where the policy model generates numerous correct but irrelevant reasoning steps to inflate rewards—by implementing clipping and computing relative, step-adjacent rewards. + +
PerspectiveMethodCharacteristicRepresentative Work
Designing CommunicationCentralized communicationUse a centralized controller for information aggregationCanese et al. (2021); Matta et al. (2019)
Conditioned information sharingShare information based on relevancy and privacyHong et al. (2023); Qiu et al. (2024)
Coordinating ActionsLeverage knowledgeUtilize expert knowledge as constraintsLau et al. (2012)
Graph-based methodsUse graphs as structured frameworksRuan et al. (2022); Li et al. (2020)
Hierarchical approachDivide policies to strategy and executionXu et al. (2023)
Training from TrajectoriesTraining data from interactionsObtain high-quality trajectories from interactionsLi et al. (2024c); Estornell et al. (2024)
Gradient modificationModify gradients towards optimal pointsLi et al. (2024f)
+ +Table 8: Summary of learning to reason for multi-agent systems. + +Qiao et al. (2023a) introduce TRICE, a two-stage framework that enables agents to determine when and how to use tools through Reinforcement Learning with Execution Feedback (RLEF) from external tools. Similarly, Xin et al. (2024b) enhance DeepSeek-Prover by incorporating reinforcement learning from proof assistant feedback (RLPAF). To effectively learn from both successful and unsuccessful agent-environment interactions, Putta et al. (2024) develop an off-policy variant of DPO for iterative training. + +Learning with refiner For more challenging questions, models may fail to generate enough successful trajectories to serve as a reliable positive training signal. However, even trajectories with incorrect outcomes can still be leveraged effectively. For example, Qu et al. (2024a) train a correction model using RL to iteratively refine generated model responses. Similarly, Tang et al. (2025) propose a self-evolving framework to train a critique model, which enhances the quality of outputs through continuous feedback. + +Refiner models can also be integrated into the search process to iteratively improve generation quality. For instance, Snell et al. (2024) train a refiner model via RL (Qu et al., 2024b) to refine outputs sequentially. The final prediction is obtained through majority voting over all predictions generated during this iterative refinement process, effectively scaling test-time computation. Xi et al. (2024) develop a step-level critique model that provides feedback for each reasoning step, using training instances collected from GPT-4o. This feedback serves two purposes: (1) expanding training data to improve the actor model, and (2) scaling test-time computation through iterative self-refinement in a multi-agent setup. Zhang et al. (2024b) combine reasoning and self-refinement into a single MCTS framework, where each node is either a reasoning node (generating complete reasoning trajectories) or a refining node (identifying and correcting reasoning flaws). A learned pairwise reward model compares the quality of refined and original outputs, estimating the expected returns of each node. However, this work does not explicitly account for the inference setting, where neither the reasoner nor the refiner has access to the correctness of the sampled response. This can lead to refiners inadvertently degrading originally correct solutions. To address this issue, Xiong et al. (2025) introduce a learnable self-rewarding mechanism. This approach mitigates the risk of worsening correct solutions and alleviates the distribution-shifting problem in self-correction (Kumar et al., 2024). + +# 5.3 Learning to Reason with Multi-agent System + +In Section 2.3, we discussed how multi-agent systems extend single-agent systems through agent-agent communication. This enables agents to assume distinct roles, exchange messages, and coordinate their actions before interacting with the environment. In this section, we explore how trajectory collection can be achieved through the careful design of agent-agent communication and the coordination of actions across different agents. As a system level, communication serves as the input or perception mechanism for participating agents, focusing on the protocols governing message exchange. Meanwhile, actions represent the output of the system, addressing how consensus is reached given the diverse actions proposed by individual agents. + +# 5.3.1 Designing Agent-Agent Communication + +In a multi-agent framework, ensuring that each agent is aware of the actions of others is critical, as a well-designed communication system can significantly enhance collective intelligence (Guo et al., 2024b). One effective solution is the use of a centralized controller (Canese et al., 2021). For example, Matta et al. (2019) propose a centralized aggregation center that constructs a global swarm matrix by aggregating the Q-value tables of all agents. Similarly, the MARCO framework (Zhang et al., 2021) employs centralized training with + +decentralized execution to improve sample efficiency in partially observable multi-agent environments. By learning a shared model that generalizes across agents' policies and directing exploration toward uncertain areas, MARCO optimizes reasoning and resource utilization in cooperative tasks. + +To enable effective communication among agents, Sukhbaatar et al. (2016) introduce a neural communication model with a learned protocol tailored to the task. Additionally, a shared message pool (Hong et al., 2023) can be implemented, where agents send messages and subscribe to relevant ones based on their individual profiles. In recent work by Qiu et al. (2024), each agent maintains a private intention, which includes its current goal and associated sub-tasks. These intentions are broadcast periodically, and a propagation network converts them into teammate-specific communication messages, ensuring that relevant goals are shared with the appropriate teammates. + +# 5.3.2 Coordinating Actions among Multiple Agents + +To enhance coordination among multiple agents, various approaches have been proposed, including leveraging expert knowledge, graph-based frameworks, and hierarchical structures to improve efficiency and effectiveness. For better coordination of actions across agents, Lau et al. (2012) utilize expert coordination knowledge as constraints to refine the exploration and learning process. By reducing the action space and focusing on promising states, this approach enhances decision-making. Additionally, graph-based methods have been explored to improve coordination. For instance, the Graph-based Coordination Strategy (GCS) (Ruan et al., 2022) introduces a framework that employs a directed acyclic graph to coordinate agent policies. This enables agents to synchronize their actions through predefined temporal sequences. Similarly, Deep Implicit Coordination Graphs (DICG) (Li et al., 2020) propose a graph neural network-based module to dynamically infer coordination structures for multi-agent reinforcement learning (MARL). + +Furthermore, hierarchical approaches have been developed to enhance synchronization. The Hierarchical Cooperative Multi-Agent Learning (HAVEN) framework (Xu et al., 2023) divides policies into two levels—strategy and execution—improving both inter-agent and inter-level coordination. + +# 5.3.3 Multi-Agent Training from Trajectories + +Compared to single-agent scenarios, multi-agent training introduces additional challenges in higher coordination and communication complexity and recent approaches have leveraged different ways to address the challenge. DEBATUNE (Li et al., 2024c) employs a multi-round debate mechanism between two agents with opposing stances to generate training data. Through iterative debate, arguments are refined, resulting in high-quality and diverse outputs. During the training phase, models are fine-tuned using these debate-generated trajectories, enabling controllability and alignment with user-defined stances. Similarly, Subramaniam et al. (2025) fine-tune a society of agents, starting from the same base model, on independent data generated through multi-agent interactions. These agents specialize in distinct roles, such as "generation" and "critic" producing diverse reasoning trajectories. Training on such varied trajectories fosters specialization and mitigates performance plateaus. Acc-Debate (Estornell et al., 2024) utilizes an Actor-Critic framework to train a team of two agents collaboratively. One agent serves as the "Actor" generating responses, while the other acts as the "Critic" refining those responses. Training alternates between optimizing the Actor and Critic models, leveraging partial trajectory rewards which captures the expectation of reaching the correct answer at intermediate time stepsto address temporal dependencies in the debate process. This approach enhances collaboration and improves final performance. + +Furthermore, Li et al. (2024f) address the challenge of mixed-motive cooperation in multi-agent systems by modifying gradients to guide agents toward stable fixed points that balance individual and collective interests. This method enhances the ability to optimize trajectories for effective collaboration. + +# 5.4 Toward Cost-aware and Inference-aware Training + +As reasoning models grow increasingly complex, ensuring both efficiency and effectiveness becomes crucial. Inference-time scaling and learning-to-reason approaches play complementary roles, as most inference-time scaling methods can be applied to models specifically trained for reasoning. However, both approaches come + +with associated costs, whether it involves generating thousands of additional tokens compared to greedy decoding during inference or training models on large-scale trajectory datasets. Consequently, cost-aware methodologies, which factor in computational costs when deciding how to allocate resources during both training and inference, or those that address sample inefficiency, have gained recent attention. Similarly, inference-aware methodologies aim to enhance the time and cost efficiency of inference scaling by explicitly incorporating inference-time scaling strategies during training. In this section, we explore emerging cost-aware and inference-aware approaches. + +# 5.4.1 Cost-aware Training + +Learning to reduce inference cost This line of research explores strategies to optimize the tradeoff between computational cost and reasoning performance by dynamically allocating resources based on input (prompt) complexity and desired output quality. For prompt analysis, Damani et al. (2025) use a learnable model to predict the difficulty of batched queries and dynamically allocate inference budgets accordingly. Building on this, Zhang et al. (2024d) train a model to predict the most efficient combination of inference strategies, directly optimizing for pass rates. Yue et al. (2025) decompose reasoning trajectories into specific behaviors and employ a trainable planner to derive question-specific compositions, identifying the optimal reasoning strategy—such as whether question decomposition or rewriting is necessary, whether Python programs are required, or if answer verification is needed. On the output side, Snell et al. (2025) propose a look-ahead search method, similar to step-level beam search, which switches between branches based on estimated returns to minimize search costs. + +Data-efficient training Another research direction focuses on reducing training costs by using a small set of high-quality samples (questions paired with trajectories or labels). Muennighoff et al. (2025) curate a dataset of 1,000 samples, emphasizing difficulty, diversity, and quality. Their work demonstrates that finetuning Qwen2.5-32B-Instruct on this dataset achieves performance surpassing o1-preview on competition math benchmarks. Ye et al. (2025) fine-tune Qwen2.5-32B-Instruct on 817 carefully curated training samples, achieving superior performance across a broader set of math reasoning benchmarks. Notably, Ye et al. (2025) highlight that these performance gains depend on using strong pre-trained models like Qwen2.5-32B-Instruct and do not occur with weaker models (e.g., Qwen1.5-32B-Instruct). + +# 5.4.2 Inference-aware Training + +Existing work on inference scaling typically treats inference-time computation as a post-hoc design choice after conventional training. Inference-aware training approach challenges the assumption that decoupling training and inference-time computation is optimal. For instance, if an LLM is allowed multiple attempts to solve a math problem, fine-tuning it to explore diverse problem-solving strategies might yield better results than simply generating candidates representing its best single attempt. + +The core idea is that explicitly considering the inference procedure during training can significantly enhance the effectiveness of inference-time computation. For example, Best-of-N (BoN) is a basic inference-time strategy that selects the highest-reward response from $N$ candidates. However, this approach is misaligned with fine-tuning objectives. To address this, Sessa et al. (2024) propose an RL objective that distills the Best-of-N distribution into the policy model using Jeffreys divergence (Jeffreys, 1946). Similarly, Balashankar et al. (2024) develop a calibrated reward that incorporates the inference procedure (Best-of-N) during alignment. In a related effort, Chow et al. (2024) aim to optimize BoN directly, overcoming the non-differentiable argmax operator by employing a reinforcement learning framework. + +# 6 Discussion: Trends and Open Challenges + +The field of reasoning LLMs has seen rapid advancements, with notable trends emerging in training-vs-inference regimes and architectural dimensions as we discuss in Section 6.1. Despite this progress, several challenges remain, hindering their generalizability and practical applicability. This section outlines these observed trends and highlights open challenges, along with potential directions to address them (Section 6.2). + +# 6.1 Observed Trends + +Following the two dimensions outlined in Figure 2, we identify two key trends in LLM reasoning: one progresses from inference scaling to learning to reason (Section 6.1.1), while the other shifts from standalone LLMs to agentic systems (Section 6.1.2). Additionally, reasoning is ubiquitous yet challenging when developing a general-purpose reasoner. Notably, many state-of-the-art reasoning language models are predominantly focused on a few domains, particularly mathematics and coding (OpenAI et al., 2024; DeepSeek-AI et al., 2025). Whether it is possible to build a truly generalizable reasoning system remains an open question (Kang et al., 2024; Qi et al., 2024; Huang et al., 2024c; Sun et al., 2024c). However, we observe a growing trend toward developing domain-specific reasoning models (Section 6.1.3). + +# 6.1.1 From Inference Scaling to Learning to Reason + +Since the introduction of CoT and self-consistency (Wang et al., 2023f), inference scaling techniques have emerged as a key paradigm for enhancing reasoning performance without incurring the costs associated with reasoning-specific training. Inference scaling complements learning-to-reason approaches, with recent studies demonstrating that combining self-consistency with reasoning-specific training yields further improvements (DeepSeek-AI et al., 2025; Muennighoff et al., 2025). Additionally, since the release of OpenAI's o1 (Huang et al., 2024d), some methods have sought to activate human-like reasoning patterns by introducing self-correction (Kumar et al., 2024), self-critique (Xi et al., 2024), or even MCTS Qin et al. (2024). + +Researchers initially found that data-driven approaches, such as supervised fine-tuning (SFT) and knowledge distillation, were highly effective in enhancing LLMs' reasoning capabilities. However, these methods rely on the availability of a strong teacher model for distillation. An alternative approach uses outcome labels for iterative rejection sampling (Yuan et al., 2023), which converges quickly after a few iterations (Dong et al., 2023). These limitations have spurred the development of more data-efficient methods, such as automatic process supervision (Jiao et al., 2024a; Wang et al., 2024g;k; Luo et al., 2024b) and iterative refinement (Guan et al., 2025), which optimize training trajectories using fixed outcome labels. The release of Deepseek-R1 (DeepSeek-AI et al., 2025) further advanced the field, demonstrating the ability to generate human-like, long reasoning chains through pure reinforcement learning under outcome supervision alone. + +# 6.1.2 From Standalone LLMs to Agentic Systems + +In Sections 2.3 and 5, we discussed how the rise of agentic systems has significantly influenced reasoning research. A clear trend has emerged, shifting from standalone LLM reasoning to agentic reasoning. This shift aligns with our expectations: reasoning is no longer confined to a single LLM but is expected to interact with the external world and other agents, as well as exhibit autonomy, such as planning capabilities. + +On one hand, there is ongoing debate about whether agentic reasoning is always beneficial, especially for straightforward and simple tasks (Sprague et al., 2024b; Liu et al., 2024c). On the other hand, current systems' autonomy is largely limited to planning, whereas it could encompass much more. For instance, system-level or meta-level planning is essential in agentic systems, requiring the design of effective ways to connect different agents (Zhou et al., 2025a; Zhuge et al., 2024; Zhang et al., 2024c; Hu et al., 2025). A notable recent study (Ke et al., 2025c) demonstrates that such design can be with zero supervision and through self-improvement alone. Another critical aspect of autonomous agents is proactivity, yet current reasoning agents still lack the ability to proactively seek clarification or request additional information from users or the environment. + +# 6.1.3 Domain-Specific Reasoners + +Mathematical reasoning Mathematics serves as an ideal testbed for studying LLM reasoning capabilities due to its structured nature and clear evaluation criteria. Mathematical reasoning has evolved along two complementary paths. The first, often referred to as the "informal approach" (Yang et al., 2024d), treats mathematical problems as natural language tasks and fine-tunes LLMs on carefully curated or filtered problem-solving datasets. Systems like NuminaMath (Fleureau et al., 2024), DeepSeekMath (Shao et al., 2024), Llemma (Azerbayev et al., 2024), and MetaMath (Yu et al., 2024b) have demonstrated remarkable + +capabilities by combining mathematical text training (pre-training, supervised fine-tuning, and reinforcement learning), tree-based search, tool-integrated reasoning, and various inference scaling techniques discussed in earlier sections. This approach has achieved significant success across benchmarks ranging from GSM8K (Cobbe et al., 2021) and MATH (Hendrycks et al., 2021b) to competition-level problems such as AIMO (Markets, 2024) and AIME-level problems (aim, 2025). However, challenges persist in tackling college-level and advanced mathematics, where high-quality training data is scarce, and verifying complex multi-step reasoning becomes increasingly difficult. Spatial reasoning (e.g., counting, navigation, and inferring spatial relationships) presents another challenge for LLMs and multi-modal LLMs (Wang et al., 2024b). + +Complementing the informal approach, formal mathematical reasoning grounds systems in precise symbolic frameworks, such as proof assistants like Isabelle (Nipkow et al., 2002), Lean (De Moura et al., 2015), and Coq (Barras et al., 1997; The Coq Development Team, 2024). Recent advances in this direction include neural theorem-proving systems that combine tactic generation with proof search (Yang et al., 2023b; Thakur et al., 2024), as well as autoformalization techniques that translate between natural and formal mathematics (Wu et al., 2022; Jiang et al., 2024a). The formal approach offers several advantages: automatic verification of reasoning steps, generation of training signals from the verification environment, and the potential to bootstrap capabilities through learned abstractions. For example, AlphaProof (AlphaProof & teams, 2024) and AlphaGeometry (Trinh et al., 2024) demonstrate the power of integrating neural networks with symbolic verification, achieving groundbreaking performance on Olympic-level mathematics problems. A recent position paper by Yang et al. (2024d) argues that formal mathematical reasoning represents a critical frontier for advancing AI's ability to tackle increasingly abstract and complex mathematical problems. + +Code generation Code serves as a more formal language for reasoning. Given the complexity of generating entire programs, earlier studies primarily focused on function-level code completion, as demonstrated by benchmarks such as HumanEval (Chen et al., 2021) and MBPP (Austin et al., 2021). With stronger foundation models trained on extensive code corpora (Zhu et al., 2024a; Hui et al., 2024), the focus of evaluation has shifted toward general competition programming (Hendrycks et al., 2021a; Jain et al., 2024). The earliest significant attempt to solve competition-level coding problems through large-scale training was AlphaCode (Li et al., 2022). Similar to the general domain, the training paradigm has evolved from instruction tuning (Wei et al., 2024) to RL and preference learning based on test cases and compiler feedback (Dou et al., 2024a; Weyssow et al., 2024; Jiao et al., 2025; Huang et al., 2024b). The recent releases of DeepSeek-R1 (DeepSeek-AI et al., 2025) and OpenAI's o3 (OpenAI et al., 2025) have further advanced the field by enabling end-to-end RL through outcome supervision. OpenAI et al. (2025) also highlight that purely data-driven approaches can outperform models incorporating human-experience-based competition strategies. + +Another important application of code generation is in software engineering, where advancements in LLMs are making fully automated pipelines increasingly feasible. SWE-Bench (Jimenez et al., 2024), a benchmark based on GitHub issues, challenges LLMs with real-world software engineering problems. These tasks require coupled abilities, such as long-context modeling to process repository-level inputs, logical reasoning to locate bugs and design unit tests, and programming to implement solutions. Wei et al. (2025) pioneer the use of end-to-end RL for optimizing automatic debugging. Specifically, they select pull requests (PRs) from GitHub linked to issues and use the consistency between the predicted code snippet and the repository's code after the PR is merged as the reward signal. + +Tabular reasoning Reasoning over tabular (or structured) data, which involves generating responses based on user queries and provided tables, plays a vital role in improving data analysis efficiency (Lu et al., 2025). A critical aspect of tabular reasoning with LLMs involves transforming structured data into a format that these models can process effectively. Techniques such as serialization (Chen, 2023; Cheng et al., 2023; Chen et al., 2023e), prompt engineering (Ye et al., 2023b; Lin et al., 2023b; Wang et al., 2024n; Zhang et al., 2024j), and embedding methods (Herzig et al., 2020) have been widely studied to facilitate this adaptation, converting tabular data into human-readable text or leveraging specialized table representations. Additionally, specialized prompting of LLMs with transformed tabular data is crucial. For instance, Pourreza & Rafiei (2023); Ye et al. (2023c) find that LLMs perform better on decomposed sub-tasks than on the entire table reasoning task. However, LLMs may still struggle with certain sub-tasks. To address this, (Cao et al., 2023) employ diverse tools for specific sub-tasks, while (Lin et al., 2023b;a) focus on retrieving relevant + +tables. Notably, (Jiang et al., 2023) propose a unified approach to enhance LLM reasoning over structured data by designing specialized interfaces. These interfaces extract relevant evidence from structured data, enabling LLMs to focus on reasoning based on the gathered information. + +Despite the promising results of various adaptation methods, significant challenges remain. First, tabular data often comprises diverse feature types—categorical, numerical, and textual—adding complexity to modeling (Borisov et al., 2023; Gruver et al., 2023). Second, the effectiveness (Sui et al., 2024) and robustness (Liu et al., 2024d) of LLMs in tabular tasks heavily depend on proper prompt design and data preprocessing. Poor or out-of-distribution preprocessing can lead to information loss, misinterpretation, multicollinearity, and interpretability issues, significantly degrading performance (Sui et al., 2024). Finally, LLMs are prone to hallucinations (Ye et al., 2023d) and fairness concerns (Liu et al., 2023), limiting their reliability. For a comprehensive overview, see recent surveys on LLMs for table reasoning (Fang et al., 2024b; Dong & Wang, 2024; Zhang et al., 2025a; Lu et al., 2025). + +Reasoning in multi-agent games In game-theoretic scenarios involving both collaboration and competition, strategic social reasoning skills are essential (Lee et al., 2024). Strategic reasoning refers to the cognitive process of making decisions in complex social situations. As highlighted by Feng et al. (2024b), the complexity and challenges of this reasoning stem from the involvement of multiple parties and the dynamic nature of the environment. + +To capture the cognitive states of multiple parties, the concept of Theory-of-Mind (ToM) (Zhang et al., 2012) has been integrated into modeling processes. ToM attributes mental states—such as beliefs, intentions, desires, emotions, and knowledge—to oneself and others. Recent studies (Kosinski, 2024) have shown that LLMs exhibit ToM capabilities, and researchers have leveraged these capabilities to enhance strategic reasoning in social scenarios. For instance, Guo et al. (2023) computationally model the beliefs, intents, and potential behaviors of teammates and opponents to improve understanding and reasoning in games. Similarly, TOMABD (Montes et al., 2023) incorporates ToM into agents to enhance their reasoning and decision-making abilities. To address the complexity of dynamic social interactions (Li et al., 2024d), prior research employs RL methods to explore potential behaviors and evaluate different states (Seo & Lee, 2017; Wen et al., 2019). Additionally, some studies introduce modular frameworks to improve strategic reasoning in complex scenarios. For example, ReTA (Duan et al., 2024) uses LLM-based modules as the main actor, reward actor, and anticipation actor, inspired by minimax game theory. Recent work (Trecsenyi et al., 2025) has also begun exploring role-based multi-agent interactions to enable more sophisticated strategic reasoning. These approaches collectively enhance LLMs' strategic reasoning capabilities in dynamic environments. + +Reward modeling and evaluation as a reasoning task Evaluation, whether as an end goal or a component of a larger reasoning system, remains a significant challenge. While using PRMs to enhance reasoning abilities is popular during both inference and training, training these models requires extensive step-by-step annotations (Lightman et al., 2024). To address this, recent approaches have introduced automated feedback mechanisms, such as tree search (Wang et al., 2024g; Chen et al., 2024a; Setlur et al., 2024a; Luo et al., 2024c; Wang et al., 2024l) or, less frequently, LLM-as-judge (Zhang et al., 2025b). Although these methods avoid human preference annotations, they often rely on trajectories sampled from a fixed policy model, which may not align well with the problem distribution. This misalignment leads to poor generalization, as highlighted by Zheng et al. (2024). Consequently, the next frontier in reward modeling will need to combine automated data collection with diverse data sources to achieve annotation-efficient generalization. + +While reasoning in LLM-as-judges is not explicitly addressed, recent training and inference techniques have drawn from established methods for improving reasoning. Judge-based assessment inherently involves a finite set of outcomes (e.g., A or B for pairwise judgments or 1-5 for single ratings), making it suitable for self-consistency decoding (Kim et al., 2024b). More advanced inference-time approaches, such as multi-judge or multi-round discussions (Li et al., 2023d; Chan et al., 2023; Verga et al., 2024; Yu et al., 2024d), self-rationalization (Trivedi et al., 2024), or sequential escalation (Jung et al., 2024), have been proposed. Concurrently, training-time solutions for LLM-as-judges focus on distilling chain-of-thought judgments from larger teacher models and fine-tuning smaller judges via supervised fine-tuning (Wang et al., 2023g; Li et al., 2023b; Kim et al., 2023; 2024c; Vu et al., 2024) or preference optimization (Hu et al., 2024; Wang et al., + +2024f; Ye et al., 2024; Saad-Falcon et al., 2024; Deshpande et al., 2024; Wang et al., 2024j). Despite these advancements, such models still struggle in reasoning-intensive domains (Tan et al., 2024; Zhou et al., 2025b; Xu et al., 2025b), whereas stronger reasoning models have outperformed specialized judge models in more difficult evaluation settings (Xu et al., 2025a). In all, recent benchmarking results highlight that developing reasoning-specific judges remains an open and challenging research area. + +# 6.2 Open Challenges + +Despite the trends observed in Section 6.1, several challenges remain. First, how can we effectively evaluate both the reasoning outcome and the reasoning chain? (Section 6.2.1). Second, do we truly understand reasoning? Does the reasoning chain generated by next-token sampling faithfully reflect the internal reasoning process of an LLM, or is it merely imitating its training data? (Section 6.2.2). Third, training of LLM reasoning system is still largely hindered by substantial data requirements, which include both more challenging questions and the corresponding outcome labels. This not only affects the end-to-end reasoner training, but also limits our exploration in building stronger reward models to facilitate inference time scaling (Section 6.2.3). + +# 6.2.1 Evaluating Reasoning + +As language models and agentic systems tackle increasingly complex tasks, evaluating their performance becomes equally challenging. Currently, progress in LLM reasoning is measured by outcome performance on fixed benchmarks (e.g., MATH (Hendrycks et al., 2021b)). However, relying solely on outcomes to verify reasoning correctness may be insufficient, as a correct final answer does not guarantee a logically sound reasoning chain (Hao et al., 2024a). Prior work has shown that LLMs often produce unfaithful reasoning chains, even when the final answers are correct (Wiegreffe et al., 2022; Lyu et al., 2023; Wang et al., 2023b). + +Evaluating reasoning beyond outcomes remains an open and challenging problem. Early approaches relied on human annotators to assess the quality of generated explanations (Camburu et al., 2018; Rajani et al., 2019), focusing on whether the reasoning could lead to the same predictions. To scale this idea, follow-up works (Wiegreffe et al., 2020; Hase et al., 2020) used trained models as simulators to evaluate the alignment between generated reasoning and final predictions. When human-annotated reasoning chains are available, some studies leverage traditional NLG metrics to measure overlap between human- and model-generated explanations (Clinciu et al., 2021). Others propose reasoning-specific metrics to assess aspects like coherency, redundancy, factuality (Golovneva et al., 2022), informativeness (Chen et al., 2022), robustness (Wang & Zhao, 2024), and contextual faithfulness (Ming et al., 2025). Under the LLM-as-Judge paradigm, recent works prompt powerful LLMs like GPT-4 to directly evaluate reasoning chains generated by other models (Hao et al., 2024a; Sun et al., 2024b). However, as reasoning tasks grow in complexity, evaluation becomes increasingly difficult, even for frontier models—if a model cannot perform a task, how can it judge if the task is done correctly? Thus, developing robust and accurate methods to evaluate reasoning beyond outcomes remains a significant and unresolved challenge. + +# 6.2.2 Understanding Reasoning + +Recent research on understanding LLM reasoning has advanced along two complementary paths: empirical studies that evaluate and analyze performance through carefully designed and controlled experiments, and formal analyses that introduce new frameworks to systematically explore the underlying mechanisms of how LLMs reason. + +Empirical analysis of reasoning Recent LLMs exhibit strong performance across diverse tasks, suggesting some level of reasoning capability. However, whether these skills are general and transferable or merely specialized for tasks encountered during pretraining remains an open and debated question. To address this, several empirical studies have sought to understand and enhance LLM capabilities across various reasoning forms: abstractive reasoning (Wu et al., 2024a; He & Lu, 2024), compositional reasoning (Bhargava & Ng, 2022; Li et al., 2024g), inductive reasoning (Yang et al., 2024f; Han et al., 2024b), abductive reasoning (Jung et al., 2022; Pareschi, 2023), deductive reasoning (Poesia et al., 2024; Seals & Shalin, 2024; Feng et al., + +2024a), logical reasoning (Wan et al., 2024b; Han et al., 2024a; Xu et al., 2025c), commonsense reasoning (Lin et al., 2021; Liang et al., 2023a; Sun et al., 2024a), math reasoning (Ahn et al., 2024; Mirzadeh et al., 2025), and social reasoning (Gandhi et al., 2023). Notably, Arkoudas (2023) qualitatively evaluate GPT-4 on 21 diverse reasoning problems, concluding that despite occasional analytical success, GPT-4 remains incapable of true reasoning. Similarly, Wu et al. (2024a) empirically investigate abstractive reasoning and find that while LLMs achieve nontrivial performance on counterfactual tasks, their performance consistently degrades compared to default conditions, indicating reliance on narrow, non-transferable procedures. Mondorf & Plank (2024) provide a comprehensive survey on recent evaluations of LLM reasoning abilities. + +Beyond assessing LLM reasoning capabilities, there is growing interest in evaluating how test-time scaling methods enhance reasoning. The empirical success of CoT prompting has spurred extensive research into its mechanisms. Wang et al. (2023a) and Madaan et al. (2023a) investigate the role of demonstrations, finding that LLMs prioritize pattern consistency over accuracy and exhibit robustness to invalid demonstrations—particularly in mathematical reasoning, where incorrect equations often do not hinder performance. They also emphasize the importance of relevant rationales and logical progression in CoT prompts. Additionally, Madaan et al. (2023a) conclude that CoT aids models by supplementing missing information, such as commonsense knowledge, and reinforcing task understanding. From a modeling perspective, Dutta et al. (2024) analyze CoT through neural mechanisms, revealing that LLMs process input context and generated CoT via parallel pathways. They find that early layers (e.g., layers 1-16 in Llama-2 7B (Touvron et al., 2023)) rely on pretraining knowledge, while later layers specialize in in-context learning, with answer-writing heads emerging in the final layers. From a task perspective, Sprague et al. (2024a) conduct a meta-analysis of 100 CoT papers, showing that CoT significantly improves performance on mathematical, logical, and algorithmic reasoning tasks but offers minimal gains for non-symbolic tasks. Their analysis suggests that CoT excels in computational steps but struggles with tool-augmented reasoning. On the training front, Gao et al. (2024a); Zhang et al. (2025b); Yeo et al. (2025) explore key supervised fine-tuning (SFT) and reinforcement learning (RL) factors that optimize LLM training strategies for enhancing CoT reasoning. + +Formal analysis of reasoning There is increasing interest in formal analyses, which use structured and logical proofs to systematically evaluate and improve the reasoning capabilities of LLMs. Han et al. (2022) introduce FOLIO, a dataset designed to assess models' ability to derive correct conclusions from premises using first-order logic reasoning. Similarly, Saparov & He (2023) develop a benchmark evaluating LLMs on symbolic ontologies, revealing that models often struggle with proof planning and rely on knowledge retrieval rather than genuine reasoning. These findings highlight the potential of neurosymbolic methods to better understand LLM reasoning. Recent work also explores formal analysis techniques to enhance LLM reasoning. For instance, Pan et al. (2023) use LLMs to translate natural language problems into symbolic formulations, which are then processed by deterministic symbolic solvers for inference. (Li et al., 2025b) demonstrate the promise of leveraging LLMs' symbolic reasoning for mathematical problem-solving. Other studies focus on domain-specific reasoning: Fang et al. (2024a) propose an LLM-based agent for text-based games, designed to tackle symbolic challenges and achieve in-game objectives, while Nahid & Rafiei (2024) introduce a framework to enhance LLMs' symbolic reasoning by normalizing web tables. These studies reveal LLMs' limitations in structured reasoning while emphasizing the value of integrating formal analysis to strengthen their capabilities. + +Theoretical analysis of ICL and CoT reasoning The success of in-context learning (ICL) and CoT prompting in enhancing LLM reasoning has sparked significant interest in understanding their underlying mechanisms from theoretical perspectives. Extensive prior studies on ICL suggest that transformer-based in-context learners effectively implement various learning algorithms, encoding implicit, context-dependent models for generation within their hidden activations—models that can be trained through demonstrations as these activations are computed. For instance, Akyurek et al. (2022) investigate this hypothesis in the context of linear regression models, while Von Oswald et al. (2023) and Dai et al. (2023) explore how transformer-based in-context learners function as meta-optimizers, effectively learning models via gradient descent during their forward pass. From a Bayesian inference perspective, Xie et al. (2022); Zhang et al. (2023) and Wang et al. (2023e) demonstrate that transformer-based in-context learners can achieve the Bayes-optimal predictor when demonstrations are selected based on a shared latent concept variable, such + +as format or task information, even in the presence of distribution mismatches between demonstrations and training data. Additionally, Elhage et al. (2021); Olsson et al. (2022) examine ICL through the concept of "induction heads" - attention heads that implement a simple algorithm to complete tasks, providing evidence that induction heads may underlie much of the in-context learning observed in transformer-based models. + +The body of work exploring the theoretical insights into CoT mechanisms remains relatively limited, with most studies focusing on the expressiveness of LLMs when using CoT. A pioneering study by Feng et al. (2023a) investigates LLMs with CoT for solving mathematical and decision-making problems. Using circuit complexity theory (Arora & Barak, 2009), they demonstrate that bounded-depth transformers cannot solve basic arithmetic or equation tasks unless the model size grows super-polynomially. In contrast, they prove that constant-size models can solve these tasks, along with a wide range of decision-making problems such as Dynamic Programming, by generating CoT derivations in a common mathematical language. Li et al. (2024h) extend these findings, providing a tighter upper bound on the expressiveness of constant-depth transformers with CoT. However, these studies do not explore how the length of a CoT affects model reasoning power. To address this gap, Merrill & Sabharwal (2024) find that a logarithmic number of intermediate steps (relative to input length) offers only marginal gains over standard transformers, while a linear number of steps under the assumption of projected pre-norm (a slight generalization of standard pre-norm) enables the recognition of all regular languages. Furthermore, polynomially many steps, combined with generalized pre-norm, allow transformers to recognize exactly the class of polynomial-time solvable problems. + +# 6.2.3 Data Challenges in Advancing Reasoning Capabilities + +Challenges in scaling question and outcome supervision for RL As discussed earlier, development trends in both general and task-specific domains are converging, with a focus on employing end-to-end RL to minimize inductive bias and push the boundaries of intelligence. Frontier models now incorporate competition-level problems annually for training, as these represent the most challenging tasks and are annotated with high-quality answers by human experts. However, we are nearing the limits of available human-annotated data, raising the question of whether methods beyond human labeling can enable the continuous scaling of RL. This challenge is particularly relevant in domains where prompts are not easily verifiable, such as open-ended generation, software engineering, and most agentic tasks. + +Challenges in reward modeling Early studies have investigated the feasibility of process supervision (Lightman et al., 2024) and its effectiveness in inference-time scaling (Snell et al., 2025). However, its high annotation costs and ambiguous definition—particularly in long CoT scenarios where self-reflection is encouraged—have limited its adoption in large-scale reinforcement learning. Despite these challenges, the key advantage of accurate process supervision is its ability to reduce hallucinations, making it essential for automated reasoning and knowledge discovery. Additionally, as discussed in Section 4.2, the training paradigm for reward models is closely tied to that of reasoning models. This raises concerns about whether allocating the same annotation budget directly to reasoning models could lead to more stable and general improvements, potentially limiting the gains achievable through inference-time scaling. + +# 7 Conclusion + +In this work, we provide a timely and comprehensive survey on LLM reasoning. We first formalize the goal of LLM reasoning and consolidate past research by categorizing reasoning techniques along two dimensions: regimes and architectures. Within each of these dimensions, we review both input and output perspectives in detail. Our review highlights emerging trends, including the shift from inference-time scaling to learning-to-reason regimes, and the transition from standalone models to agentic systems. We also review and compare a wide range of learning algorithms, including supervised fine-tuning and reinforcement learning, as well as the training of reasoners and training of verifiers. Despite these advancements, challenges remain in evaluating reasoning and understanding real reasoning mechanisms as well as addressing data challenges in advancing reasoning capabilities. We encourage future research to further explore these trends, such as inference-aware learning-to-reason and automated multi-agent design, to enhance LLM reasoning. + +# Acknowledgments + +We thank M Saiful Bari, Semih Yavuz and Yingbo Zhou for helpful discussions. + +# References + +American invitational mathematics examination. Mathematical Association of America, 2025. https://maa.org/maa-invitational-competitions/. +Rishabh Agarwal, Avi Singh, Lei Zhang, Bernd Bohnet, Luis Rosias, Stephanie Chan, Biao Zhang, Ankesh Anand, Zaheer Abbas, Azade Nova, et al. Many-shot in-context learning. Advances in Neural Information Processing Systems, 37:76930-76966, 2024. +Sweta Agrawal, Chunting Zhou, Mike Lewis, Luke Zettlemoyer, and Marjan Ghazvininejad. In-context examples selection for machine translation. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Findings of the Association for Computational Linguistics: ACL 2023, pp. 8857-8873, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-acl.564. URL https://aclanthology.org/2023-findings-acl.564/. +Arash Ahmadian, Chris Cremer, Matthias Gallé, Marzieh Fadaee, Julia Kreutzer, Olivier Pietquin, Ahmet Üstün, and Sara Hooker. Back to basics: Revisiting reinforce style optimization for learning from human feedback in llms. arXiv preprint arXiv:2402.14740, 2024. +Janice Ahn, Rishu Verma, Renze Lou, Di Liu, Rui Zhang, and Wenpeng Yin. Large language models for mathematical reasoning: Progresses and challenges. In Neele Falk, Sara Papi, and Mike Zhang (eds.), Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics: Student Research Workshop, pp. 225-237, St. Julian's, Malta, March 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.eacl-srw.17/. +Afra Feyza Akyürek, Ekin Akyürek, Aman Madaan, Ashwin Kalyan, Peter Clark, Derry Wijaya, and Niket Tandon. Rl4f: Generating natural language feedback with reinforcement learning for repairing model outputs. arXiv preprint arXiv:2305.08844, 2023. +Ekin Akyürek, Dale Schuurmans, Jacob Andreas, Tengyu Ma, and Denny Zhou. What learning algorithm is in-context learning? investigations with linear models. In The Eleventh International Conference on Learning Representations, 2022. +AlphaProof and AlphaGeometry teams. AI achieves silver-medal standard solving international mathematical olympiad problems. https://deepmind.google/discover/blog/ai-solves-imo-problems-at-silver-medal-level/, 2024. +Konstantine Arkoudas. Gpt-4 can't reason. arXiv preprint arXiv:2308.03762, 2023. +Sanjeev Arora and Boaz Barak. Computational complexity: a modern approach. Cambridge University Press, 2009. +Krishna Aswani, Huilin Lu, Pranav Patankar, Priya Dhalwani, Xue Tan, Jayant Ganeshmohan, and Simon Lacasse. Auto-evolve: Enhancing large language model's performance via self-reasoning framework. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (eds.), Findings of the Association for Computational Linguistics: EMNLP 2024, pp. 13243-13257, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-emnlp.774. URL https://aclanthology.org/2024-findings-emnlp.774/. +Jacob Austin, Augustus Odena, Maxwell Nye, Maarten Bosma, Henryk Michalewski, David Dohan, Ellen Jiang, Carrie Cai, Michael Terry, Quoc Le, et al. Program synthesis with large language models. arXiv preprint arXiv:2108.07732, 2021. +Mohammad Gheshlaghi Azar, Zhaohan Daniel Guo, Bilal Piot, Remi Munos, Mark Rowland, Michal Valko, and Daniele Calandriello. A general theoretical paradigm to understand learning from human preferences. In International Conference on Artificial Intelligence and Statistics, pp. 4447-4455. PMLR, 2024. + +Zhangir Azerbayev, Hailey Schoelkopf, Keiran Paster, Marco Dos Santos, Stephen Marcus McAleer, Albert Q Jiang, Jia Deng, Stella Biderman, and Sean Welleck. LLemma: An open language model for mathematics. In International Conference on Learning Representations (ICLR), 2024. +Yuntao Bai, Andy Jones, Kamal Ndousse, Amanda Askell, Anna Chen, Nova DasSarma, Dawn Drain, Stanislav Fort, Deep Ganguli, Tom Henighan, et al. Training a helpful and harmless assistant with reinforcement learning from human feedback. arXiv preprint arXiv:2204.05862, 2022. +Ananth Balashankar, Ziteng Sun, Jonathan Berant, Jacob Eisenstein, Michael Collins, Adrian Hutter, Jong Lee, Chirag Nagpal, Flavien Prost, Aradhana Sinha, Ananda Theertha Suresh, and Ahmad Beirami. Infalign: Inference-aware language model alignment. CoRR, abs/2412.19792, 2024. doi: 10.48550/ARXIV.2412.19792. URL https://doi.org/10.48550/arXiv.2412.19792. +Bruno Barras, Samuel Boutin, Cristina Cornes, Judicael Courant, Jean-Christophe Filliatre, Eduardo Gimenez, Hugo Herbelin, Gerard Huet, Cesar Munoz, Chetan Murthy, et al. The Coq proof assistant reference manual: Version 6.1. PhD thesis, Inria, 1997. +Richard Bellman. Dynamic programming and stochastic control processes. Information and Control, 1 (3):228-239, 1958. ISSN 0019-9958. doi: https://doi.org/10.1016/S0019-9958(58)80003-0. URL https://www.sciencedirect.com/science/article/pii/S0019995858800030. +Bespoke Labs. Bespoke-stratos: The unreasonable effectiveness of reasoning distillation. www.bespokelabs.ai/blog/bespoke-stratos-the-unreasonable-effectiveness-of-reasoning-distillation, 2025. Accessed: 2025-01-22. +Maciej Besta, Nils Blach, Ales Kubicek, Robert Gerstenberger, Michal Podstawski, Lukas Gianinazzi, Joanna Gajda, Tomasz Lehmann, Hubert Niewiadomski, Piotr Nczyk, et al. Graph of thoughts: Solving elaborate problems with large language models. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 38, pp. 17682-17690, 2024. +Maciej Besta, Julia Barth, Eric Schreiber, Ales Kubicek, Afonso Catarino, Robert Gerstenberger, Piotr Nczyk, Patrick Iff, Yueling Li, Sam Houliston, et al. Reasoning language models: A blueprint. arXiv preprint arXiv:2501.11223, 2025. +Prajjwal Bhargava and Vincent Ng. Commonsense knowledge reasoning and generation with pre-trained language models: A survey. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 36, pp. 12317-12325, 2022. +Zhenni Bi, Kai Han, Chuanjian Liu, Yehui Tang, and Yunhe Wang. Forest-of-thought: Scaling test-time compute for enhancing llm reasoning. arXiv preprint arXiv:2412.09078, 2024. URL https://arxiv.org/pdf/2412.09078. +Vadim Borisov, Kathrin Sessler, Tobias Leemann, Martin Pawelczyk, and Gjergji Kasneci. Language models are realistic tabular data generators. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=cEygmmQNOeI. +Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling. arXiv preprint arXiv:2407.21787, 2024. +Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models are few-shot learners, 2020. URL https://arxiv.org/abs/2005.14165. + +Oana-Maria Camburu, Tim Rocktäschel, Thomas Lukasiewicz, and Phil Blunsom. e-snli: Natural language inference with natural language explanations. Advances in Neural Information Processing Systems, 31, 2018. +Lorenzo Canese, Gian Carlo Cardarilli, Luca Di Nunzio, Rocco Fazzolari, Daniele Giardino, Marco Re, and Sergio Spanò. Multi-agent reinforcement learning: A review of challenges and applications. Applied Sciences, 11(11):4948, 2021. +Yihan Cao, Shuyi Chen, Ryan Liu, Zhiruo Wang, and Daniel Fried. API-assisted code generation for question answering on varied table structures. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pp. 14536-14548, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.897. URL https://aclanthology.org/2023.emnlp-main.897/. +Stephen Casper, Xander Davies, Claudia Shi, Thomas Krendl Gilbert, Jérémy Scheurer, Javier Rando, Rachel Freedman, Tomasz Korbak, David Lindner, Pedro Freire, et al. Open problems and fundamental limitations of reinforcement learning from human feedback. arXiv preprint arXiv:2307.15217, 2023. +Chi-Min Chan, Weize Chen, Yusheng Su, Jianxuan Yu, Wei Xue, Shanghang Zhang, Jie Fu, and Zhiyuan Liu. Chateval: Towards better llm-based evaluators through multi-agent debate. arXiv preprint arXiv:2308.07201, 2023. +Edward Y Chang. Socrasynth: Multi-llm reasoning with conditional statistics. arXiv preprint arXiv:2402.06634, 2024. +Baian Chen, Chang Shu, Ehsan Shareghi, Nigel Collier, Karthik Narasimhan, and Shunyu Yao. Fireact: Toward language agent fine-tuning. CoRR, abs/2310.05915, 2023a. doi: 10.48550/ARXIV.2310.05915. URL https://doi.org/10.48550/arXiv.2310.05915. +Bei Chen, Fengji Zhang, Anh Nguyen, Daoguang Zan, Zeqi Lin, Jian-Guang Lou, and Weizhu Chen. Codet: Code generation with generated tests. In The Eleventh International Conference on Learning Representations, 2023b. URL https://openreview.net/forum?id=ktrw68Cmu9c. +Guoxin Chen, Minpeng Liao, Chengxi Li, and Kai Fan. Alphamath almost zero: process supervision without process. arXiv preprint arXiv:2405.03553, 2024a. +Hanjie Chen, Faeze Brahman, Xiang Ren, Yangfeng Ji, Yejin Choi, and Swabha Swayamdipta. Information-theoretic evaluation of free-text rationales with conditional $\mathcal{V}$ -information. In Workshop on Trustworthy and Socially Responsible Machine Learning, NeurIPS 2022, 2022. +Justin Chih-Yao Chen, Swarnadeep Saha, and Mohit Bansal. Reconcile: Round-table conference improves reasoning via consensus among diverse llms. arXiv preprint arXiv:2309.13007, 2023c. +Justin Chih-Yao Chen, Archiki Prasad, Swarnadeep Saha, Elias Stengel-Eskin, and Mohit Bansal. Magicore: Multi-agent, iterative, coarse-to-fine refinement for reasoning, 2024b. URL https://arxiv.org/abs/2409.12147. +Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Ponde de Oliveira Pinto, Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, Greg Brockman, Alex Ray, Raul Puri, Gretchen Krueger, Michael Petrov, Heidi Khlaaf, Girish Sastry, Pamela Mishkin, Brooke Chan, Scott Gray, Nick Ryder, Mikhail Pavlov, Alethea Power, Lukasz Kaiser, Mohammad Bavarian, Clemens Winter, Philippe Tillet, Felipe Petroski Such, Dave Cummings, Matthias Plappert, Fotios Chantzis, Elizabeth Barnes, Ariel Herbert-Voss, William Hebgen Guss, Alex Nichol, Alex Paino, Nikolas Tezak, Jie Tang, Igor Babuschkin, Suchir Balaji, Shantanu Jain, William Saunders, Christopher Hesse, Andrew N. Carr, Jan Leike, Josh Achiam, Vedant Misra, Evan Morikawa, Alec Radford, Matthew Knight, Miles Brundage, Mira Murati, Katie Mayer, Peter Welinder, Bob McGrew, Dario Amodei, Sam McCandlish, Ilya Sutskever, and Wojciech Zaremba. Evaluating large language models trained on code, 2021. + +Pei Chen, Boran Han, and Shuai Zhang. Comm: Collaborative multi-agent, multi-reasoning-path prompting for complex problem solving. arXiv preprint arXiv:2404.17729, 2024c. +Wei-Lin Chen, Cheng-Kuang Wu, Yun-Nung Chen, and Hsin-Hsi Chen. Self-ICL: Zero-shot in-context learning with self-generated demonstrations. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pp. 15651–15662, Singapore, December 2023d. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.968. URL https://aclanthology.org/2023.emnlp-main.968/. +Wenhu Chen. Large language models are few(1)-shot table reasoners. In Andreas Vlachos and Isabelle Augenstein (eds.), Findings of the Association for Computational Linguistics: EACL 2023, pp. 1120-1130, Dubrovnik, Croatia, May 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.findings-eacl.83. URL https://aclanthology.org/2023-findings-eacl.83/. +Wenhu Chen, Xueguang Ma, Xinyi Wang, and William W. Cohen. Program of thoughts prompting: Disentangling computation from reasoning for numerical reasoning tasks. Transactions on Machine Learning Research, 2023e. ISSN 2835-8856. URL https://openreview.net/forum?id=YfZ4ZPt8zd. +Xingyu Chen, Jiahao Xu, Tian Liang, Zhiwei He, Jianhui Pang, Dian Yu, Linfeng Song, Qiuzhi Liu, Mengfei Zhou, Zhuosheng Zhang, et al. Do not think that much for $2 + 3 = ?$ on the overthinking of o1-like llms. arXiv preprint arXiv:2412.21187, 2024d. +Zehui Chen, Kuikun Liu, Qiuchen Wang, Wenwei Zhang, Jiangning Liu, Dahua Lin, Kai Chen, and Feng Zhao. Agent-flan: Designing data and methods of effective agent tuning for large language models. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Findings of the Association for Computational Linguistics, ACL 2024, Bangkok, Thailand and virtual meeting, August 11-16, 2024, pp. 9354-9366. Association for Computational Linguistics, 2024e. URL https://doi.org/10.18653/v1/2024-findings-acl.557. +Zihan Chen, Song Wang, Zhen Tan, Jundong Li, and Cong Shen. Maple: Many-shot adaptive pseudo-labeling for in-context learning, 2025. URL https://arxiv.org/abs/2505.16225. +Zhoujun Cheng, Tianbao Xie, Peng Shi, Chengzu Li, Rahul Nadkarni, Yushi Hu, Caiming Xiong, Dragomir Radev, Mari Ostendorf, Luke Zettlemoyer, Noah A. Smith, and Tao Yu. Binding language models in symbolic languages. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=1H1PV42cbF. +Yinlam Chow, Guy Tennenholtz, Izzeddin Gur, Vincent Zhuang, Bo Dai, Sridhar Thiagarajan, Craig Boutilier, Rishabh Agarwal, Aviral Kumar, and Aleksandra Faust. Inference-aware fine-tuning for best-of-n sampling in large language models. CoRR, abs/2412.15287, 2024. doi: 10.48550/ARXIV.2412.15287. URL https://doi.org/10.48550/arXiv.2412.15287. +Miruna Clinciu, Arash Eshghi, and Helen Hastie. A study of automatic metrics for the evaluation of natural language explanations. arXiv preprint arXiv:2103.08545, 2021. +Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, et al. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021. +Jonathan Cook, Tim Rocktäschel, Jakob Foerster, Dennis Aumiller, and Alex Wang. Ticking all the boxes: Generated checklists improve llm evaluation and generation. arXiv preprint arXiv:2410.03608, 2024. +Ganqu Cui, Lifan Yuan, Zefan Wang, Hanbin Wang, Wendi Li, Bingxiang He, Yuchen Fan, Tianyu Yu, Qixin Xu, Weize Chen, Jiarui Yuan, Huayu Chen, Kaiyan Zhang, Xingtai Lv, Shuo Wang, Yuan Yao, Xu Han, Hao Peng, Yu Cheng, Zhiyuan Liu, Maosong Sun, Bowen Zhou, and Ning Ding. Process reinforcement through implicit rewards, 2025. URL https://arxiv.org/abs/2502.01456. + +Chris Cummins, Volker Seeker, Dejan Grubisic, Mostafa Elhoushi, Youwei Liang, Baptiste Roziere, Jonas Gehring, Fabian Gloeckle, Kim Hazelwood, Gabriel Synnaeve, et al. Large language models for compiler optimization. arXiv preprint arXiv:2309.07062, 2023. URL https://arxiv.org/abs/2309.07062. +Damai Dai, Yutao Sun, Li Dong, Yaru Hao, Shuming Ma, Zhifang Sui, and Furu Wei. Why can GPT learn in context? language models secretly perform gradient descent as meta-optimizers. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Findings of the Association for Computational Linguistics: ACL 2023, pp. 4005–4019, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-acl.247. URL https://aclanthology.org/2023-findings-acl.247/. +Ning Dai, Zheng Wu, Renjie Zheng, Ziyun Wei, Wenlei Shi, Xing Jin, Guanlin Liu, Chen Dun, Liang Huang, and Lin Yan. Process supervision-guided policy optimization for code generation. arXiv preprint arXiv:2410.17621, 2024. +Mehul Damani, Idan Shenfeld, Andi Peng, Andreea Bobu, and Jacob Andreas. Learning how hard to think: Input-adaptive allocation of LM computation. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=6qUUgw9bAZ. +Debrup Das, Debopriyo Banerjee, Somak Aditya, and Ashish Kulkarni. Mathsensei: A tool-augmented large language model for mathematical reasoning. arXiv preprint arXiv:2402.17231, 2024. +Leonardo De Moura, Soonho Kong, Jeremy Avigad, Floris Van Doorn, and Jakob von Raumer. The lean theorem prover (system description). In _Automated Deduction-CADE-25: 25th International Conference on Automated Deduction_, Berlin, Germany, August 1-7, 2015, Proceedings 25, pp. 378-388. Springer, 2015. +DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, Yu Wu, Z. F. Wu, Zhibin Gou, Zhihong Shao, Zhuoshu Li, Ziyi Gao, Aixin Liu, Bing Xue, Bingxuan Wang, Bochao Wu, Bei Feng, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao, Guanting Chen, Guowei Li, H. Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Qu, Hui Li, Jianzhong Guo, Jiashi Li, Jiawei Wang, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, J. L. Cai, Jiaqi Ni, Jian Liang, Jin Chen, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Liang Zhao, Litong Wang, Liyue Zhang, Lei Xu, Leyi Xia, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Meng Li, Miaojun Wang, Mingming Li, Ning Tian, Panpan Huang, Peng Zhang, Qiancheng Wang, Qinyu Chen, Qiushi Du, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, R. J. Chen, R. L. Jin, Ruyi Chen, Shanghai Lu, Shangyan Zhou, Shanhuang Chen, Shengfeng Ye, Shiyu Wang, Shuiping Yu, Shunfeng Zhou, Shuting Pan, S. S. Li, Shuang Zhou, Shaoqing Wu, Shengfeng Ye, Tao Yun, Tian Pei, Tianyu Sun, T. Wang, Wangding Zeng, Wanjia Zhao, Wen Liu, Wenfeng Liang, Wenjun Gao, Wenqin Yu, Wentao Zhang, W. L. Xiao, Wei An, Xiaodong Liu, Xiaohan Wang, Xiaokang Chen, Xiaotao Nie, Xin Cheng, Xin Liu, Xin Xie, Xingchao Liu, Xinyu Yang, Xinyuan Li, Xuecheng Su, Xuheng Lin, X. Q. Li, Xiangyue Jin, Xiaojin Shen, Xiaosha Chen, Xiaowen Sun, Xiaoxiang Wang, Xinnan Song, Xinyi Zhou, Xianzu Wang, Xinxia Shan, Y. K. Li, Y. Q. Wang, Y. X. Wei, Yang Zhang, Yanhong Xu, Yao Li, Yao Zhao, Yaofeng Sun, Yaohui Wang, Yi Yu, Yichao Zhang, Yifan Shi, Yiliang Xiong, Ying He, Yishi Piao, Yisong Wang, Yixuan Tan, Yiyang Ma, Yiyuan Liu, Yongqiang Guo, Yuan Ou, Yuduan Wang, Yue Gong, Yuheng Zou, Yujia He, Yunfan Xiong, Yuxiang Luo, Yuxiang You, Yuxuan Liu, Yuyang Zhou Y. X. Zhu Yanhong Xu Yanping Huang Yaohui Li Yi ZhengYuchen ZhuYunxian Ma Ying Tang Yukun Zha Yuting YanZ.Z.Ren Zehui Ren,Zhangli ShaZhe FuZhean XuZhenda Xie Zhengyan Zhang,Zhenwen Hao,Zhicheng Ma,Zhigang Yan,Zhiyu WuZihui GuZijia ZhuZijun Liu Zilin Li Ziwei Xie Ziyang Song Zizheng Pan Zhen Huang Zhipeng Xu Zhongyu Zhang and Zhen Zhang.Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning 2025. URL https://arxiv.org/abs/2501.12948. +Shumin Deng, Ningyu Zhang, Nay Oo, and Bryan Hooi. Towards a unified view of answer calibration for multi-step reasoning. In Bhavana Dalvi Mishra, Greg Durrett, Peter Jansen, Ben Lipkin, Danilo Neves Ribeiro, Lionel Wong, Xi Ye, and Wenting Zhao (eds.), Proceedings of the 2nd Workshop on Natural + +Language Reasoning and Structured Explanations (@ACL 2024), pp. 25-38, Bangkok, Thailand, August 2024a. Association for Computational Linguistics. URL https://aclanthology.org/2024.nlrse-1.3/. +Yihe Deng, Weitong Zhang, Zixiang Chen, and Quanquan Gu. Rephrase and respond: Let large language models ask better questions for themselves. arXiv preprint arXiv:2311.04205, 2023a. +Yuntian Deng, Kiran Prasad, Roland Fernandez, Paul Smolensky, Vishrav Chaudhary, and Stuart M. Shieber. Implicit chain of thought reasoning via knowledge distillation. CoRR, abs/2311.01460, 2023b. +Yuntian Deng, Yejin Choi, and Stuart M. Shieber. From explicit cot to implicit cot: Learning to internalize cot step by step. CoRR, abs/2405.14838, 2024b. URL https://doi.org/10.48550/arXiv.2405.14838. +Darshan Deshpande, Selvan Sunitha Ravi, Sky CH-Wang, Bartosz Mielczarek, Anand Kannappan, and Rebecca Qian. Glider: Grading llm interactions and decisions using explainable ranking. arXiv preprint arXiv:2412.14140, 2024. +Hanze Dong, Wei Xiong, Deepanshu Goyal, Yihan Zhang, Winnie Chow, Rui Pan, Shizhe Diao, Jipeng Zhang, Kashun Shum, and Tong Zhang. RAFT: reward ranked finetuning for generative foundation model alignment. Trans. Mach. Learn. Res., 2023, 2023. URL https://openreview.net/forum?id=m7p507zb1Y. +Haoyu Dong and Zhiruo Wang. Large language models for tabular data: Progresses and future directions. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '24, pp. 2997-3000, New York, NY, USA, 2024. Association for Computing Machinery. ISBN 97898400704314. doi: 10.1145/3626772.3661384. URL https://doi.org/10.1145/3626772.3661384. +Qingxiu Dong, Lei Li, Damai Dai, Ce Zheng, Jingyuan Ma, Rui Li, Heming Xia, Jingjing Xu, Zhiyong Wu, Baobao Chang, et al. A survey on in-context learning. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pp. 1107-1128, 2024. +Jiri Dostál. Theory of problem solving. Procedia - Social and Behavioral Sciences, 174:2798-2805, 2015. ISSN 1877-0428. doi: https://doi.org/10.1016/j.sbspro.2015.01.970. URL https://www.sciencedirect.com/science/article/pii/S1877042815010290. International Conference on New Horizons in Education, INTE 2014, 25-27 June 2014, Paris, France. +Shihan Dou, Yan Liu, Haoxiang Jia, Limao Xiong, Enyu Zhou, Wei Shen, Junjie Shan, Caishuang Huang, Xiao Wang, Xiaoran Fan, Zhiheng Xi, Yuhao Zhou, Tao Ji, Rui Zheng, Qi Zhang, Xuanjing Huang, and Tao Gui. Stepcoder: Improve code generation with reinforcement learning from compiler feedback. CoRR, abs/2402.01391, 2024a. doi: 10.48550/ARXIV.2402.01391. URL https://doi.org/10.48550/arXiv.2402.01391. +Zi-Yi Dou, Cheng-Fu Yang, Xueqing Wu, Kai-Wei Chang, and Nanyun Peng. Re-ReST: Reflection-reinforced self-training for language agents. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (eds.), Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pp. 15394-15411, Miami, Florida, USA, November 2024b. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.861. URL https://aclanthology.org/2024.emnlp-main.861/. +Dheeru Dua, Shivanshu Gupta, Sameer Singh, and Matt Gardner. Successive prompting for decomposing complex questions. In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang (eds.), Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 1251-1265, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.emnlp-main.81. URL https://aclanthology.org/2022.emnlp-main.81. +Jinhao Duan, Shiqi Wang, James Diffenderfer, Lichao Sun, Tianlong Chen, Bhavya Kailkhura, and Kaidi Xu. Reta: Recursively thinking ahead to improve the strategic reasoning of large language models. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 2232-2246, 2024. + +Tom Duenas and Diana Ruiz. The path to superintelligence: A critical analysis of openai's five levels of ai progression. Research Gate, 2024. +Subhabrata Dutta, Joykirat Singh, Soumen Chakrabarti, and Tanmoy Chakraborty. How to think step-by-step: A mechanistic understanding of chain-of-thought reasoning. Transactions on Machine Learning Research, 2024. ISSN 2835-8856. URL https://openreview.net/forum?id=uHLDkQVtyC. +Nelson Elhage, Neel Nanda, Catherine Olsson, Tom Henighan, Nicholas Joseph, Ben Mann, Amanda Askell, Yuntao Bai, Anna Chen, Tom Conerly, et al. A mathematical framework for transformer circuits. Transformer Circuits Thread, 1(1):12, 2021. +Andrew Estornell, Jean-Francois Ton, Yuanshun Yao, and Yang Liu. Acc-debate: An actor-critic approach to multi-agent debate. arXiv preprint arXiv:2411.00053, 2024. +Kawin Ethayarajh, Winnie Xu, Niklas Muennighoff, Dan Jurafsky, and Douwe Kiela. Kto: Model alignment as prospect theoretic optimization. arXiv preprint arXiv:2402.01306, 2024. +Meng Fang, Shilong Deng, Yudi Zhang, Zijing Shi, Ling Chen, Mykola Pechenizkiy, and Jun Wang. Large language models are neurosymbolic reasoners. Proceedings of the AAAI Conference on Artificial Intelligence, 38(16):17985-17993, Mar. 2024a. doi: 10.1609/aaai.v38i16.29754. URL https://ojs.aaai.org/index.php/AAAI/article/view/29754. +Xi Fang, Weijie Xu, Fiona Anting Tan, Ziqing Hu, Jiani Zhang, Yanjun Qi, Srinivasan H. Sengamedu, and Christos Faloutsos. Large language models (LLMs) on tabular data: Prediction, generation, and understanding - a survey. Transactions on Machine Learning Research, 2024b. ISSN 2835-8856. URL https://openreview.net/forum?id=IZnrCGF9WI. +Guhao Feng, Bohang Zhang, Yuntian Gu, Haotian Ye, Di He, and Liwei Wang. Towards revealing the mystery behind chain of thought: a theoretical perspective. Advances in Neural Information Processing Systems, 36:70757-70798, 2023a. +Jiazhan Feng, Ruochen Xu, Junheng Hao, Hiteshi Sharma, Yelong Shen, Dongyan Zhao, and Weizhu Chen. Language models can be deductive solvers. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Findings of the Association for Computational Linguistics: NAACL 2024, pp. 4026-4042, Mexico City, Mexico, June 2024a. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-nacl.254. URL https://aclanthology.org/2024 findings-nacl.254/. +Xiachong Feng, Longxu Dou, Ella Li, Qinghao Wang, Haochuan Wang, Yu Guo, Chang Ma, and Lingpeng Kong. A survey on large language model-based social agents in game-theoretic scenarios, 2024b. URL https://arxiv.org/abs/2412.03920. +Xidong Feng, Ziyu Wan, Muning Wen, Stephen Marcus McAleer, Ying Wen, Weinan Zhang, and Jun Wang. Alphazero-like tree-search can guide large language model decoding and training. arXiv preprint arXiv:2309.17179, 2023b. +Chrisantha Fernando, Dylan Sunil Banarse, Henryk Michalewski, Simon Osindero, and Tim Rocktäschel. Promptbreeder: Self-referential self-improvement via prompt evolution. In *Forty-first International Conference on Machine Learning*, 2024. URL https://openreview.net/forum?id=9ZxnPZGmPU. +Emily First, Markus N Rabe, Talia Ringer, and Yuriy Brun. Baldur: Whole-proof generation and repair with large language models. In Proceedings of the 31st ACM Joint European Software Engineering Conference and Symposium on the Foundations of Software Engineering, pp. 1229-1241, 2023. +Yann Fleureau, Jia Li, Edward Beeching, Lewis Tunstall, Ben Lipkin, Roman Soletskyi, Shengyi Costa Huang, and Kashif Rasul. How NuminaMath won the 1st AIMO Progress Prize. https://huggingface.co/blog/winning-aimo-progress-prize, 2024. +Adam Fourney, Gagan Bansal, Hussein Mozannar, Cheng Tan, Eduardo Salinas, Friederike Niedtner, Grace Proebsting, Griffin Bassman, Jack Gerrits, Jacob Alber, et al. Magentic-one: A generalist multi-agent system for solving complex tasks. arXiv preprint arXiv:2411.04468, 2024. + +Adrian Garret Gabriel, Alaa Alameer Ahmad, and Shankar Kumar Jeyakumar. Advancing agentic systems: Dynamic task decomposition, tool integration and evaluation using novel metrics and dataset, 2024. URL https://arxiv.org/abs/2410.22457. +Kanishk Gandhi, Jan-Philipp Franken, Tobias Gerstenberg, and Noah Goodman. Understanding social reasoning in language models with language models. In Thirty-seventh Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2023. URL https://openreview.net/forum?id=8bqjirgxQM. +Deep Ganguli, Liane Lovitt, Jackson Kernion, Amanda Askell, Yuntao Bai, Saurav Kadavath, Ben Mann, Ethan Perez, Nicholas Schiefer, Kamal Ndousse, et al. Red teaming language models to reduce harms: Methods, scaling behaviors, and lessons learned. arXiv preprint arXiv:2209.07858, 2022. +Jiaxuan Gao, Shusheng Xu, Wenjie Ye, Weilin Liu, Chuyi He, Wei Fu, Zhiyu Mei, Guangju Wang, and Yi Wu. On designing effective rl reward at training time for llm reasoning. arXiv preprint arXiv:2410.15115, 2024a. +Peizhong Gao, Ao Xie, Shaoguang Mao, Wenshan Wu, Yan Xia, Haipeng Mi, and Furu Wei. Meta reasoning for large language models. arXiv preprint arXiv:2406.11698, 2024b. +Olga Golovneva, Moya Chen, Spencer Poff, Martin Corredor, Luke Zettlemoyer, Maryam Fazel-Zarandi, and Asli Celikyilmaz. Roscoe: A suite of metrics for scoring step-by-step reasoning. arXiv preprint arXiv:2212.07919, 2022. +Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. Advances in neural information processing systems, 27, 2014. URL https://proceedings.neurips.cc/paper_files/paper/2014/file/5ca3e9b122f61f8f06494c97b1afccf3-Paper.pdf. +Zhibin Gou, Zhihong Shao, Yeyun Gong, Yelong Shen, Yujiu Yang, Minlie Huang, Nan Duan, and Weizhu Chen. Tora: A tool-integrated reasoning agent for mathematical problem solving. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=Ep0TjVoap. +Sachin Goyal, Ziwei Ji, Ankit Singh Rawat, Aditya Krishna Menon, Sanjiv Kumar, and Vaishnavh Nagarajan. Think before you speak: Training language models with pause tokens. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=ph04CRkPdC. +Nate Gruver, Marc Anton Finzi, Shikai Qiu, and Andrew Gordon Wilson. Large language models are zero-shot time series forecasters. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=md68e8iZK1. +Zhengyao Gu, Henry Peng Zou, Yankai Chen, Aiwei Liu, Weizhi Zhang, and Philip S Yu. Semi-supervised in-context learning: A baseline study. arXiv preprint arXiv:2503.03062, 2025. +Xinyu Guan, Li Lyna Zhang, Yifei Liu, Ning Shang, Youran Sun, Yi Zhu, Fan Yang, and Mao Yang. rstar-math: Small llms can master math reasoning with self-evolved deep thinking. arXiv preprint arXiv: 2501.04519, 2025. +Jiaxian Guo, Bo Yang, Paul Yoo, Bill Yuchen Lin, Yusuke Iwasawa, and Yutaka Matsuo. Suspicion-agent: Playing imperfect information games with theory of mind aware gpt-4. arXiv preprint arXiv:2309.17277, 2023. +Qingyan Guo, Rui Wang, Junliang Guo, Bei Li, Kaitao Song, Xu Tan, Guoqing Liu, Jiang Bian, and Yujiu Yang. Connecting large language models with evolutionary algorithms yields powerful prompt optimizers. In The Twelfth International Conference on Learning Representations, 2024a. URL https://openreview.net/forum?id=ZG3RaNIs08. +Taicheng Guo, Xiuying Chen, Yaqi Wang, Ruidi Chang, Shichao Pei, Nitesh V Chawla, Olaf Wiest, and Xiangliang Zhang. Large language model based multi-agents: A survey of progress and challenges. arXiv preprint arXiv:2402.01680, 2024b. + +Zakaria Hammane, Fatima-Ezzahraa Ben-Bouazza, and Abdelhadi Fennan. Selfrewarddrag: Enhancing medical reasoning with retrieval-augmented generation and self-evaluation in large language models. In 2024 International Conference on Intelligent Systems and Computer Vision (ISCV), pp. 1-8. IEEE, 2024. +Simeng Han, Hailey Schoelkopf, Yilun Zhao, Zhenting Qi, Martin Riddell, Wenfei Zhou, James Coady, David Peng, Yujie Qiao, Luke Benson, et al. Folio: Natural language reasoning with first-order logic. arXiv preprint arXiv:2209.00840, 2022. +Simeng Han, Aaron Yu, Rui Shen, Zhenting Qi, Martin Riddell, Wenfei Zhou, Yujie Qiao, Yilun Zhao, Semih Yavuz, Ye Liu, Shafiq Joty, Yingbo Zhou, Caiming Xiong, Dragomir Radev, Rex Ying, and Arman Cohen. P-FOLIO: Evaluating and improving logical reasoning with abundant human-written reasoning chains. In Findings of the Association for Computational Linguistics: EMNLP 2024, pp. 16553-16565, Miami, Florida, USA, November 2024a. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-emnlp.966. URL https://aclanthology.org/2024-findings-emnlp.966/. +Simon Jerome Han, Keith J Ransom, Andrew Perfors, and Charles Kemp. Inductive reasoning in humans and large language models. Cognitive Systems Research, 83:101155, 2024b. +Shibo Hao, Yi Gu, Haodi Ma, Joshua Jiahua Hong, Zhen Wang, Daisy Zhe Wang, and Zhiting Hu. Reasoning with language model is planning with world model. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, EMNLP 2023, Singapore, December 6-10, 2023, pp. 8154-8173. Association for Computational Linguistics, 2023. doi: 10.18653/V1/2023.EMNLP-MAIN.507. URL https://doi.org/10.18653/v1/2023.emnlp-main.507. +Shibo Hao, Yi Gu, Haotian Luo, Tianyang Liu, Xiyuan Shao, Xinyuan Wang, Shuhua Xie, Haodi Ma, Adithya Samavedhi, Qiyue Gao, et al. Llm reasoners: New evaluation, library, and analysis of step-by-step reasoning with large language models. arXiv preprint arXiv:2404.05221, 2024a. +Shibo Hao, Sainbayar Sukhbaatar, DiJia Su, Xian Li, Zhiting Hu, Jason Weston, and Yuandong Tian. Training large language models to reason in a continuous latent space. CoRR, abs/2412.06769, 2024b. URL https://doi.org/10.48550/arXiv.2412.06769. +Peter Hase, Shiyue Zhang, Harry Xie, and Mohit Bansal. Leakage-adjusted simulatability: Can models generate non-trivial explanations of their behavior in natural language? arXiv preprint arXiv:2010.04119, 2020. +Michael Hassid, Tal Remez, Jonas Gehring, Roy Schwartz, and Yossi Adi. The larger the better? improved llm code-generation via budget reallocation. arXiv preprint arXiv:2404.00725, 2024. +Alex Havrilla, Yuqing Du, Sharath Chandra Raparthy, Christoforos Nalmpantis, Jane Dwivedi-Yu, Maksym Zhuravinskyi, Eric Hambro, Sainbayar Sukhbaatar, and Roberta Raileanu. Teaching large language models to reason with reinforcement learning. arXiv preprint arXiv:2403.04642, 2024. +Jiabang He, Lei Wang, Yi Hu, Ning Liu, Hui Liu, Xing Xu, and Heng Tao Shen. Icl-d3ie: In-context learning with diverse demonstrations updating for document information extraction. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 19485-19494, 2023. +Jinwei He and Feng Lu. Causejudger: Identifying the cause with llms for abductive logical reasoning. arXiv preprint arXiv:2409.05559, 2024. +Dan Hendrycks, Steven Basart, Saurav Kadavath, Mantas Mazeika, Akul Arora, Ethan Guo, Collin Burns, Samir Puranik, Horace He, Dawn Song, and Jacob Steinhardt. Measuring coding challenge competence with apps. NeurIPS, 2021a. +Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the math dataset. arXiv preprint arXiv:2103.03874, 2021b. + +Jonathan Herzig, Pawel Krzysztof Nowak, Thomas Müller, Francesco Piccinno, and Julian Eisenschlos. TaPas: Weakly supervised table parsing via pre-training. In Dan Jurafsky, Joyce Chai, Natalie Schluter, and Joel Tetreault (eds.), Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pp. 4320-4333, Online, July 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.acl-main.398. URL https://aclanthology.org/2020.acl-main.398/. +Keith J Holyoak. Analogy and relational reasoning. The Oxford handbook of thinking and reasoning, pp. 234-259, 2012. URL https://psycnet.apa.org/record/2012-08871-013. +Jiwoo Hong, Noah Lee, and James Thorne. Orpo: Monolithic preference optimization without reference model. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pp. 11170-11189, 2024. +Sirui Hong, Xiawu Zheng, Jonathan Chen, Yuheng Cheng, Jinlin Wang, Ceyao Zhang, Zili Wang, Steven Ka Shing Yau, Zijuan Lin, Liyang Zhou, et al. Metagpt: Meta programming for multi-agent collaborative framework. arXiv preprint arXiv:2308.00352, 3(4):6, 2023. +Xinyi Hou, Yanjie Zhao, Shenao Wang, and Haoyu Wang. Model context protocol (mcp): Landscape, security threats, and future research directions. arXiv preprint arXiv:2503.23278, 2025. +Shengran Hu, Cong Lu, and Jeff Clune. Automated design of agentic systems. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=t9U3LW7JVX. +Xinyu Hu, Li Lin, Mingqi Gao, Xunjian Yin, and Xiaojun Wan. Themis: A reference-free nlg evaluation language model with flexibility and interpretability. arXiv preprint arXiv:2406.18365, 2024. +Jie Huang and Kevin Chen-Chuan Chang. Towards reasoning in large language models: A survey. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Findings of the Association for Computational Linguistics: ACL 2023, pp. 1049–1065, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.findings-acl.67. URL https://aclanthology.org/2023-findings-acl.67/. +Jie Huang, Xinyun Chen, Swaroop Mishra, Huaixiu Steven Zheng, Adams Wei Yu, Xinying Song, and Denny Zhou. Large language models cannot self-correct reasoning yet. In The Twelfth International Conference on Learning Representations, 2024a. URL https://openreview.net/forum?id=IkmD3fKBPQ. +Siming Huang, Tianhao Cheng, J. K. Liu, Jiaran Hao, Liuyihan Song, Yang Xu, J. Yang, J. H. Liu, Chenchen Zhang, Linzheng Chai, Ruifeng Yuan, Zhaoxiang Zhang, Jie Fu, Qian Liu, Ge Zhang, Zili Wang, Yuan Qi, Yinghui Xu, and Wei Chu. Opencoder: The open cookbook for top-tier code large language models. CoRR, abs/2411.04905, 2024b. doi: 10.48550/ARXIV.2411.04905. URL https://doi.org/10.48550/arXiv.2411.04905. +Yuncheng Huang, Qianyu He, Yipei Xu, Jiaqing Liang, and Yanghua Xiao. Laying the foundation first? investigating the generalization from atomic skills to complex reasoning tasks, 2024c. URL https:// arxiv.org/abs/2403.09479. +Zhen Huang, Haoyang Zou, Xuefeng Li, Yixiu Liu, Yuxiang Zheng, Ethan Chern, Shijie Xia, Yiwei Qin, Weizhe Yuan, and Pengfei Liu. O1 replication journey-part 2: Surpassing o1-preview through simple distillation, big progress or bitter lesson? arXiv preprint arXiv:2411.16489, 2024d. +Binyuan Hui, Jian Yang, Zeyu Cui, Jiaxi Yang, Dayiheng Liu, Lei Zhang, Tianyu Liu, Jiajun Zhang, Bowen Yu, Keming Lu, et al. Qwen2. 5-coder technical report. arXiv preprint arXiv:2409.12186, 2024. +Michael Huth and Mark Ryan. Logic in computer science: Modelling and reasoning about systems. Cambridge university press., 86, 2004. +Naman Jain, King Han, Alex Gu, Wen-Ding Li, Fanjia Yan, Tianjun Zhang, Sida Wang, Armando Solar-Lezama, Koushik Sen, and Ion Stoica. Livocodebench: Holistic and contamination free evaluation of large language models for code. arXiv preprint arXiv:2403.07974, 2024. + +Harold Jeffreys. An invariant form for the prior probability in estimation problems. Proceedings of the Royal Society of London. Series A, Mathematical and Physical Sciences, 186:453-461, 1946. doi: 10.1098/rspa.1946.0056. URL http://doi.org/10.1098/rspa.1946.0056. +Albert Q. Jiang, Wenda Li, and Mateja Jamnik. Multi-language diversity benefits autoformalization. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024a. URL https://openreview.net/forum?id=2jjfRm2R6D. +Jinhao Jiang, Kun Zhou, Zican Dong, Keming Ye, Xin Zhao, and Ji-Rong Wen. StructGPT: A general framework for large language model to reason over structured data. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pp. 9237-9251, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.574. URL https://aclanthology.org/2023.emnlp-main.574/. +Jinhao Jiang, Jiayi Chen, Junyi Li, Ruiyang Ren, Shijie Wang, Wayne Xin Zhao, Yang Song, and Tao Zhang. Rag-star: Enhancing deliberative reasoning with retrieval augmented verification and refinement. arXiv preprint arXiv:2412.12881, 2024b. +Fangkai Jiao, Chengwei Qin, Zhengyuan Liu, Nancy Chen, and Shafiq Joty. Learning planning-based reasoning by trajectories collection and process reward synthesizing. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (eds.), Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, EMNLP 2024, Miami, FL, USA, November 12-16, 2024, pp. 334-350. Association for Computational Linguistics, 2024a. URL https://aclanthology.org/2024.emnlp-main.20. +Fangkai Jiao, Zhiyang Teng, Bosheng Ding, Zhengyuan Liu, Nancy F. Chen, and Shafiq Joty. Exploring self-supervised logic-enhanced training for large language models. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), NAACL 2024, Mexico City, Mexico, June 16-21, 2024, pp. 926-941. Association for Computational Linguistics, 2024b. +Fangkai Jiao, Geyang Guo, Xingxing Zhang, Nancy F. Chen, Shafiq Joty, and Furu Wei. Preference optimization for reasoning with pseudo feedback. In ICLR. OpenReview.net, 2025. +Carlos E. Jimenez, John Yang, Alexander Wettig, Shunyu Yao, Kexin Pei, Ofir Press, and Karthik R. Narasimhan. Swe-bench: Can language models resolve real-world github issues? In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=VTF8yNQM66. +Jaehun Jung, Lianhui Qin, Sean Welleck, Faeze Brahman, Chandra Bhagavatula, Ronan Le Bras, and Yejin Choi. Maieutic prompting: Logically consistent reasoning with recursive explanations. In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang (eds.), Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 1266-1279, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.emnlp-main.82. URL https://aclanthology.org/2022.emnlp-main.82/. +Jaehun Jung, Faeze Brahman, and Yejin Choi. Trust or escalate: Llm judges with provable guarantees for human agreement. arXiv preprint arXiv:2407.18370, 2024. +Katie Kang, Amrith Setlur, Dibya Ghosh, Jacob Steinhardt, Claire Tomlin, Sergey Levine, and Aviral Kumar. What do learning dynamics reveal about generalization in llm reasoning?, 2024. URL https://arxiv.org/abs/2411.07681. +Sayash Kapoor, Benedikt Stroebl, Zachary S Siegel, Nitya Nadgir, and Arvind Narayanan. Ai agents that matter. arXiv preprint arXiv:2407.01502, 2024. +Amirhossein Kazemnejad, Milad Aghajohari, Eva Portelance, Alessandro Sordoni, Siva Reddy, Aaron Courville, and Nicolas Le Roux. Vineppo: Unlocking rl potential for llm reasoning through refined credit assignment. arXiv preprint arXiv:2410.01679, 2024. + +Zixuan Ke and Bing Liu. Continual learning of natural language processing tasks: A survey, 2023. URL https://arxiv.org/abs/2211.12701. +Zixuan Ke, Yijia Shao, Haowei Lin, Tatsuya Konishi, Gyuhak Kim, and Bing Liu. Continual pre-training of language models, 2023. URL https://arxiv.org/abs/2302.03241. +Zixuan Ke, Weize Kong, Cheng Li, Mingyang Zhang, Qiaozhu Mei, and Michael Bendersky. Bridging the preference gap between retrievers and llms, 2024. URL https://arxiv.org/abs/2401.06954. +Zixuan Ke, Yifei Ming, and Shafiq Joty. Adaptation of large language models. In Maria Lomeli, Swabha Swayamdipta, and Rui Zhang (eds.), Proceedings of the 2025 Annual Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 5: Tutorial Abstracts), pp. 30-37, Albuquerque, New Mexico, May 2025a. Association for Computational Linguistics. ISBN 979-8-89176-193-3. doi: 10.18653/v1/2025.naacl-tutorial.5. URL https://aclanthology.org/2025.naacl-tutorial.5/. +Zixuan Ke, Yifei Ming, Xuan-Phi Nguyen, Caiming Xiong, and Shafiq Joty. Demystifying domain-adaptive post-training for financial llms. arXiv preprint arXiv:2501.04961, 2025b. +Zixuan Ke, Austin Xu, Yifei Ming, Xuan-Phi Nguyen, Caiming Xiong, and Shafiq Joty. Mas-zero: Designing multi-agent systems with zero supervision, 2025c. URL https://arxiv.org/abs/2505.14996. +Omar Khattab, Keshav Santhanam, Xiang Lisa Li, David Hall, Percy Liang, Christopher Potts, and Matei Zaharia. Demonstrate-search-predict: Composing retrieval and language models for knowledge-intensive nlp. arXiv preprint arXiv:2212.14024, 2022. +Tushar Khot, Harsh Trivedi, Matthew Finlayson, Yao Fu, Kyle Richardson, Peter Clark, and Ashish Sabharwal. Decomposed prompting: A modular approach for solving complex tasks. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=nGgzQjzaRy. +Dongkwan Kim, Junho Myung, and Alice Oh. Salad-bowl-LLM: Multi-culture LLMs by in-context demonstrations from diverse cultures. In Workshop on Socially Responsible Language Modelling Research, 2024a. URL https://openreview.net/forum?id=KsAfPGPZZn. +Seungone Kim, Jamin Shin, Yejin Cho, Joel Jang, Shayne Longpre, Hwaran Lee, Sangdoo Yun, Seongjin Shin, Sungdong Kim, James Thorne, et al. Prometheus: Inducing fine-grained evaluation capability in language models. In The Twelfth International Conference on Learning Representations, 2023. +Seungone Kim, Juyoung Suk, Ji Yong Cho, Shayne Longpre, Chaeun Kim, Dongkeun Yoon, Guijin Son, Yejin Cho, Sheikh Shafayat, Jinheon Baek, et al. The biggen bench: A principled benchmark for fine-grained evaluation of language models with language models. arXiv preprint arXiv:2406.05761, 2024b. +Seungone Kim, Juyoung Suk, Shayne Longpre, Bill Yuchen Lin, Jamin Shin, Sean Welleck, Graham Neubig, Moontae Lee, Kyungjae Lee, and Minjoon Seo. Prometheus 2: An open source language model specialized in evaluating other language models. arXiv preprint arXiv:2405.01535, 2024c. +Sunghwan Kim, Dongjin Kang, Taeyoon Kwon, Hyungjoo Chae, Jungsoo Won, Dongha Lee, and Jinyoung Yeo. Evaluating robustness of reward models for mathematical reasoning, 2024d. URL https://arxiv.org/abs/2410.01729. +Takeshi Kojima, Shixiang Shane Gu, Machel Reid, Yutaka Matsuo, and Yusuke Iwasawa. Large language models are zero-shot reasoners. Advances in neural information processing systems, 35:22199-22213, 2022. +Wouter Kool, Herke van Hoof, and Max Welling. Buy 4 reinforce samples, get a baseline for free! 2019. +Michal Kosinski. Evaluating large language models in theory of mind tasks. Proceedings of the National Academy of Sciences, 121(45):e2405460121, 2024. + +Julia Kreutzer, Artem Sokolov, and Stefan Riezler. Bandit structured prediction for neural sequence-to-sequence learning. arXiv preprint arXiv:1704.06497, 2017. +Aviral Kumar, Vincent Zhuang, Rishabh Agarwal, Yi Su, John D. Co-Reyes, Avi Singh, Kate Baumli, Shariq Iqbal, Colton Bishop, Rebecca Roelofs, Lei M. Zhang, Kay McKinney, Disha Shrivastava, Cosmin Paduraru, George Tucker, Doina Precup, Feryal M. P. Behbahani, and Aleksandra Faust. Training language models to self-correct via reinforcement learning. CoRR, abs/2409.12917, 2024. doi: 10.48550/ARXIV.2409.12917. URL https://doi.org/10.48550/arXiv.2409.12917. +Xin Lai, Zhuotao Tian, Yukang Chen, Senqiao Yang, Xiangru Peng, and Jiaya Jia. Step-dpo: Step-wise preference optimization for long-chain reasoning of llms. CoRR, abs/2406.18629, 2024. doi: 10.48550/ ARXIV.2406.18629. URL https://doi.org/10.48550/arXiv.2406.18629. +Nathan Lambert, Jacob Morrison, Valentina Pyatkin, Shengyi Huang, Hamish Ivison, Faeze Brahman, Lester James V. Miranda, Alisa Liu, Nouha Dziri, Shane Lyu, Yuling Gu, Saumya Malik, Victoria Graf, Jena D. Hwang, Jiangjiang Yang, Ronan Le Bras, Oyvind Tafjord, Chris Wilhelm, Luca Soldaini, Noah A. Smith, Yizhong Wang, Pradeep Dasigi, and Hannaneh Hajishirzi. Tülu 3: Pushing frontiers in open language model post-training. 2024. +Qiangfeng Peter Lau, Mong-Li Lee, and Wynne Hsu. Coordination guided reinforcement learning. In AAMAS, pp. 215-222, 2012. +Harrison Lee, Samrat Phatale, Hassan Mansoor, Kellie Ren Lu, Thomas Mesnard, Johan Ferret, Colton Bishop, Ethan Hall, Victor Carbune, and Abhinav Rastogi. Rlaif: Scaling reinforcement learning from human feedback with ai feedback. 2023. +Sangmin Lee, Minzhi Li, Bolin Lai, Wenqi Jia, Fiona Ryan, Xu Cao, Ozgur Kara, Bikram Boote, Weiyan Shi, Diyi Yang, et al. Towards social ai: A survey on understanding social interactions. arXiv preprint arXiv:2409.15316, 2024. +Itay Levy, Ben Boin, and Jonathan Berant. Diverse demonstrations improve in-context compositional generalization. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 1401-1422, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.78. URL https://aclanthology.org/2023.acl-long.78/. +Chengshu Li, Jacky Liang, Andy Zeng, Xinyun Chen, Karol Hausman, Dorsa Sadigh, Sergey Levine, Li Fei-Fei, Fei Xia, and Brian Ichter. Chain of code: Reasoning with a language model-augmented code emulator. arXiv preprint arXiv:2312.04474, 2023a. +Haoran Li, Qingxiu Dong, Zhengyang Tang, Chaojun Wang, Xingxing Zhang, Haoyang Huang, Shaohan Huang, Xiaolong Huang, Zeqiang Huang, Dongdong Zhang, Yuxian Gu, Xin Cheng, Xun Wang, Si-Qing Chen, Li Dong, Wei Lu, Zhifang Sui, Benyou Wang, Wai Lam, and Furu Wei. Synthetic data (almost) from scratch: Generalized instruction tuning for language models. CoRR, abs/2402.13064, 2024a. URL https://doi.org/10.48550/arXiv.2402.13064. +Junlong Li, Shichao Sun, Weizhe Yuan, Run-Ze Fan, Hai Zhao, and Pengfei Liu. Generative judge for evaluating alignment. arXiv preprint arXiv:2310.05470, 2023b. +Junyou Li, Qin Zhang, Yangbin Yu, Qiang Fu, and Deheng Ye. More agents is all you need. Transactions on Machine Learning Research, 2024b. ISSN 2835-8856. URL https://openreview.net/forum?id=bgzUSZ8aeg. +Ming Li, Jiuhai Chen, Lichang Chen, and Tianyi Zhou. Can llms speak for diverse people? tuning llms via debate to generate controllable controversial statements. arXiv preprint arXiv:2402.10614, 2024c. +Minzhi Li, Weiyan Shi, Caleb Ziems, and Diyi Yang. Social intelligence data infrastructure: Structuring the present and navigating the future. arXiv preprint arXiv:2403.14659, 2024d. + +Minzhi Li, Zhengyuan Liu, Shumin Deng, Shafiq Joty, Nancy Chen, and Min-Yen Kan. Dna-eval: Enhancing large language model evaluation through decomposition and aggregation. In Proceedings of the 31st International Conference on Computational Linguistics, pp. 2277-2290, 2025a. +Mukai Li, Shansan Gong, Jiangtao Feng, Yiheng Xu, Jun Zhang, Zhiyong Wu, and Lingpeng Kong. Incontext learning with many demonstration examples. arXiv preprint arXiv:2302.04931, 2023c. +Ruosen Li, Teerth Patel, and Xinya Du. Prd: Peer rank and discussion improve large language model based evaluations. arXiv preprint arXiv:2307.02762, 2023d. +Sheng Li, Jayesh K Gupta, Peter Morales, Ross Allen, and Mykel J Kochenderfer. Deep implicit coordination graphs for multi-agent reinforcement learning. arXiv preprint arXiv:2006.11438, 2020. +Xiaonan Li, Kai Lv, Hang Yan, Tianyang Lin, Wei Zhu, Yuan Ni, Guotong Xie, Xiaoling Wang, and Xipeng Qiu. Unified demonstration retriever for in-context learning. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 4644-4668, Toronto, Canada, July 2023e. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.256. URL https://aclanthology.org/2023.acl-long.256/. +Xingxuan Li, Ruochen Zhao, Yew Ken Chia, Bosheng Ding, Shafiq Joty, Soujanya Poria, and Lidong Bing. Chain-of-knowledge: Grounding large language models via dynamic knowledge adapting over heterogeneous sources, 2024e. URL https://arxiv.org/abs/2305.13269. +Yang Li, Wenhao Zhang, Jianhong Wang, Shao Zhang, Yali Du, Ying Wen, and Wei Pan. Aligning individual and collective objectives in multi-agent cooperation. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024f. URL https://openreview.net/forum?id=2YSHEBRRol. +Yujia Li, David Choi, Junyoung Chung, Nate Kushman, Julian Schrittwieser, Rémi Leblond, Tom Eccles, James Keeling, Felix Gimeno, Agustin Dal Lago, Thomas Hubert, Peter Choy, Cyprien de Masson d'Autume, Igor Babuschkin, Xinyun Chen, Po-Sen Huang, Johannes Welbl, Sven Gowal, Alexey Cherepanov, James Molloy, Daniel J. Mankowitz, Esme Sutherland Robson, Pushmeet Kohli, Nando de Freitas, Koray Kavukcuoglu, and Oriol Vinyals. Competition-level code generation with alphabet. Science, 378(6624):1092-1097, December 2022. ISSN 1095-9203. doi: 10.1126/science.abq1158. URL http://dx.doi.org/10.1126/science.abq1158. +Zenan Li, Zhaoyu Li, Wen Tang, Xian Zhang, Yuan Yao, Xujie Si, Fan Yang, Kaiyu Yang, and Xiaoxing Ma. Proving olympiad inequalities by synergizing LLMs and symbolic reasoning. In The Thirteenth International Conference on Learning Representations, 2025b. URL https://openreview.net/forum?id=FiyS0ecSm0. +Zhaoyi Li, Gangwei Jiang, Hong Xie, Linqi Song, Defu Lian, and Ying Wei. Understanding and patching compositional reasoning in LLMs. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Findings of the Association for Computational Linguistics: ACL 2024, pp. 9668-9688, Bangkok, Thailand, August 2024g. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-acl.576. URL https://aclanthology.org/2024-findings-acl.576/. +Zhiyuan Li, Hong Liu, Denny Zhou, and Tengyu Ma. Chain of thought empowers transformers to solve inherently serial problems. In The Twelfth International Conference on Learning Representations, 2024h. URL https://openreview.net/forum?id=3EWTEy9MTM. +Percy Liang, Rishi Bommasani, Tony Lee, Dimitris Tsipras, Dilara Soylu, Michihiro Yasunaga, Yian Zhang, Deepak Narayanan, Yuhuai Wu, Ananya Kumar, Benjamin Newman, Binhang Yuan, Bobby Yan, Ce Zhang, Christian Alexander Cosgrove, Christopher D Manning, Christopher Re, Diana Acosta-Navas, Drew Arad Hudson, Eric Zelikman, Esin Durmus, Faisal Ladhak, Frieda Rong, Hongyu Ren, Huaxiu Yao, Jue WANG, Keshav Santhanam, Laurel Orr, Lucia Zheng, Mert Yuksekgonul, Mirac Suzgun, Nathan Kim, Neel Guha, Niladri S. Chatterji, Omar Khattab, Peter Henderson, Qian Huang, Ryan Andrew Chi, Sang Michael Xie, Shibani Santurkar, Surya Ganguli, Tatsunori Hashimoto, Thomas Icard, Tianyi Zhang, + +Vishrav Chaudhary, William Wang, Xuechen Li, Yifan Mai, Yuhui Zhang, and Yuta Koreeda. Holistic evaluation of language models. Transactions on Machine Learning Research, 2023a. ISSN 2835-8856. URL https://openreview.net/forum?id=i04LZibEqW. Featured Certification, Expert Certification. +Tian Liang, Zhiwei He, Wenxiang Jiao, Xing Wang, Yan Wang, Rui Wang, Yujiu Yang, Shuming Shi, and Zhaopeng Tu. Encouraging divergent thinking in large language models through multi-agent debate. arXiv preprint arXiv:2305.19118, 2023b. +Yancheng Liang, Daphne Chen, Abhishek Gupta, Simon Shaolei Du, and Natasha Jaques. Learning to cooperate with humans using generative agents. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. URL https://openreview.net/forum?id=v4dXL3LsGX. +Hunter Lightman, Vineet Kosaraju, Yuri Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=v8LOpN6EOi. +Bill Yuchen Lin, Seyeon Lee, Xiaoyang Qiao, and Xiang Ren. Common sense beyond English: Evaluating and improving multilingual language models for commonsense reasoning. In Chengqing Zong, Fei Xia, Wenjie Li, and Roberto Navigli (eds.), Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pp. 1274-1287, Online, August 2021. Association for Computational Linguistics. doi: 10.18653/v1/2021.acl-long.102. URL https://aclanthology.org/2021.acl-long.102/. +Weizhe Lin, Rexhina Blloshmi, Bill Byrne, Adria de Gispert, and Gonzalo Iglesias. An inner table retriever for robust table question answering. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 9909–9926, Toronto, Canada, July 2023a. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.551. URL https://aclanthology.org/2023.acl-long.551/. +Weizhe Lin, Rexhina Blloshmi, Bill Byrne, Adria de Gispert, and Gonzalo Iglesias. LI-RAGE: Late interaction retrieval augmented generation with explicit signals for open-domain table question answering. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pp. 1557-1566, Toronto, Canada, July 2023b. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-short.133. URL https://aclanthology.org/2023.acl-short.133/. +Chris Yuhao Liu, Liang Zeng, Jiacai Liu, Rui Yan, Jujie He, Chaojie Wang, Shuicheng Yan, Yang Liu, and Yahui Zhou. Skywork-reward: Bag of tricks for reward modeling in llms. arXiv preprint arXiv:2410.18451, 2024a. +Hanmeng Liu, Zhizhang Fu, Mengru Ding, Ruoxi Ning, Chaoli Zhang, Xiaozhang Liu, and Yue Zhang. Logical reasoning in large language models: A survey. arXiv preprint arXiv:2502.09100, 2025a. +Jiachang Liu, Dinghan Shen, Yizhe Zhang, Bill Dolan, Lawrence Carin, and Weizhu Chen. What makes good in-context examples for GPT-3? In Eneko Agirre, Marianna Apidianaki, and Ivan Vulić (eds.), Proceedings of Deep Learning Inside Out (DeeLIO 2022): The 3rd Workshop on Knowledge Extraction and Integration for Deep Learning Architectures, pp. 100–114, Dublin, Ireland and Online, May 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.deelio-1.10. URL https://aclanthology.org/2022.deelio-1.10/. +Liang Liu, Dong Zhang, Shoushan Li, Guodong Zhou, and Erik Cambria. Two heads are better than one: Zero-shot cognitive reasoning via multi-llm knowledge fusion. In Proceedings of the 33rd ACM International Conference on Information and Knowledge Management, pp. 1462–1472, 2024b. +Ryan Liu, Jiayi Geng, Addison J. Wu, Ilia Sucholutsky, Tania Lombrozo, and Thomas L. Griffiths. Mind your step (by step): Chain-of-thought can reduce performance on tasks where thinking makes humans worse, 2024c. URL https://arxiv.org/abs/2410.21333. + +Tianyang Liu, Fei Wang, and Muhao Chen. Rethinking tabular data understanding with large language models. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 450-482, Mexico City, Mexico, June 2024d. Association for Computational Linguistics. doi: 10.18653/v1/2024.naacl-long.26. URL https://aclanthology.org/2024.naacl-long.26/. +Tongxuan Liu, Xingyu Wang, Weizhe Huang, Wenjiang Xu, Yuting Zeng, Lei Jiang, Hailong Yang, and Jing Li. Groupdebate: Enhancing the efficiency of multi-agent debate using group discussion. arXiv preprint arXiv:2409.14051, 2024e. +Yanchen Liu, Srishti Gautam, Jiaqi Ma, and Himabindu Lakkaraju. Investigating the fairness of large language models for predictions on tabular data. In *Socially Responsible Language Modelling Research*, 2023. URL https://openreview.net/forum?id=V1740FqidS. +Yantao Liu, Zijun Yao, Rui Min, Yixin Cao, Lei Hou, and Juanzi Li. Pairwise rm: Perform best-of-n sampling with knockout tournament. arXiv preprint arXiv:2501.13007, 2025b. +Zhihan Liu, Hao Hu, Shenao Zhang, Hongyi Guo, Shuqi Ke, Boyi Liu, and Zhaoran Wang. Reason for future, act for now: A principled architecture for autonomous LLM agents. In Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pp. 31186-31261. PMLR, 21-27 Jul 2024f. +Zichen Liu, Changyu Chen, Wenjun Li, Tianyu Pang, Chao Du, and Min Lin. There may not be a hah moment in r1-zero-like training — a pilot study. https://oatllm.notion.site/oat-zero, 2025c. Notion Blog. +Do Xuan Long, Hai Nguyen Ngoc, Tiviatis Sim, Hieu Dao, Shafiq Joty, Kenji Kawaguchi, Nancy F Chen, and Min-Yen Kan. Llms are biased towards output formats! systematically evaluating and mitigating output format bias of llms. arXiv preprint arXiv:2408.08656, 2024a. +Do Xuan Long, Duong Ngoc Yen, Anh Tuan Luu, Kenji Kawaguchi, Min-Yen Kan, and Nancy F. Chen. Multi-expert prompting improves reliability, safety and usefulness of large language models. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (eds.), Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pp. 20370-20401, Miami, Florida, USA, November 2024b. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.1135. URL https://aclanthology.org/2024.emnlp-main.1135/. +Do Xuan Long, Yiran Zhao, Hannah Brown, Yuxi Xie, James Zhao, Nancy Chen, Kenji Kawaguchi, Michael Shieh, and Junxian He. Prompt optimization via adversarial in-context learning. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 7308-7327, Bangkok, Thailand, August 2024c. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.395. URL https://aclanthology.org/2024.acl-long.395/. +Pan Lu, Baolin Peng, Hao Cheng, Michel Galley, Kai-Wei Chang, Ying Nian Wu, Song-Chun Zhu, and Jianfeng Gao. Chameleon: Plug-and-play compositional reasoning with large language models. Advances in Neural Information Processing Systems, 36, 2024a. +Weizheng Lu, Jing Zhang, Ju Fan, Zihao Fu, Yueguo Chen, and Xiaoyong Du. Large language model for table processing: A survey. Frontiers of Computer Science, 19(2):192350, 2025. +Xinyuan Lu, Liangming Pan, Yubo Ma, Preslav Nakov, and Min-Yen Kan. Tart: An open-source tool-augmented framework for explainable table-based reasoning. arXiv preprint arXiv:2409.11724, 2024b. +Haipeng Luo, Qingfeng Sun, Can Xu, Pu Zhao, Jianguang Lou, Chongyang Tao, Xiubo Geng, Qingwei Lin, Shifeng Chen, and Dongmei Zhang. Wizardmath: Empowering mathematical reasoning for large language models via reinforced evol-instruct. arXiv preprint arXiv:2308.09583, 2023a. + +Haipeng Luo, Qingfeng Sun, Can Xu, Pu Zhao, Jianguang Lou, Chongyang Tao, Xiubo Geng, Qingwei Lin, Shifeng Chen, and Dongmei Zhang. Wizardmath: Empowering mathematical reasoning for large language models via reinforced evol-instruct. CoRR, abs/2308.09583, 2023b. doi: 10.48550/ARXIV.2308.09583. URL https://doi.org/10.48550/arXiv.2308.09583. +Kangyang Luo, Zichen Ding, Zhenmin Weng, Lingfeng Qiao, Meng Zhao, Xiang Li, Di Yin, and Jinlong Shu. Let's be self-generated via step by step: A curriculum learning approach to automated reasoning with large language models. arXiv preprint arXiv:2410.21728, 2024a. URL https://arxiv.org/abs/2410.21728. +Liangchen Luo, Yinxiao Liu, Rosanne Liu, Samrat Phatale, Harsh Lara, Yunxuan Li, Lei Shu, Yun Zhu, Lei Meng, Jiao Sun, and Abhinav Rastogi. Improve mathematical reasoning in language models by automated process supervision. CoRR, abs/2406.06592, 2024b. doi: 10.48550/ARXIV.2406.06592. URL https://doi.org/10.48550/arXiv.2406.06592. +Liangchen Luo, Yinxiao Liu, Rosanne Liu, Samrat Phatale, Harsh Lara, Yunxuan Li, Lei Shu, Yun Zhu, Lei Meng, Jiao Sun, et al. Improve mathematical reasoning in language models by automated process supervision. arXiv preprint arXiv:2406.06592, 2024c. +Man Luo, Xin Xu, Zhuyun Dai, Panupong Pasupat, Mehran Kazemi, Chitta Baral, Vaiva Imbrasaite, and Vincent Y Zhao. Dr. icl: Demonstration-retrieved in-context learning. arXiv preprint arXiv:2305.14128, 2023c. +Man Luo, Xin Xu, Yue Liu, Panupong Pasupat, and Mehran Kazemi. In-context learning with retrieved demonstrations for language models: A survey. Transactions on Machine Learning Research, 2024d. ISSN 2835-8856. URL https://openreview.net/forum?id=NQPo8ZhQPa. Survey Certification. +Qing Lyu, Shreya Havaldar, Adam Stein, Li Zhang, Delip Rao, Eric Wong, Marianna Apidianaki, and Chris Callison-Burch. Faithful chain-of-thought reasoning. In Jong C. Park, Yuki Arase, Baotian Hu, Wei Lu, Derry Wijaya, Ayu Purwarianti, and Adila Alfa Krisnadhi (eds.), Proceedings of the 13th International Joint Conference on Natural Language Processing and the 3rd Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 305-329, Nusa Dua, Bali, November 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.ijcnlp-main.20. URL https://aclanthology.org/2023.ijcnlp-main.20/. +Yubo Ma, Zhibin Gou, Junheng Hao, Ruochen Xu, Shuohang Wang, Liangming Pan, Yujiu Yang, Yixin Cao, and Aixin Sun. Sciagent: Tool-augmented language models for scientific reasoning. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (eds.), Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, EMNLP 2024, Miami, FL, USA, November 12-16, 2024, pp. 15701-15736. Association for Computational Linguistics, 2024a. URL https://aclanthology.org/2024.emnlp-main.880. +Yubo Ma, Zhibin Gou, Junheng Hao, Ruochen Xu, Shuohang Wang, Liangming Pan, Yujiu Yang, Yixin Cao, Aixin Sun, Hany Awadalla, et al. Sciagent: Tool-augmented language models for scientific reasoning. arXiv preprint arXiv:2402.11451, 2024b. +Aman Madaan, Katherine Hermann, and Amir Yazdanbakhsh. What makes chain-of-thought prompting effective? a counterfactual study. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Findings of the Association for Computational Linguistics: EMNLP 2023, pp. 1448-1535, Singapore, December 2023a. Association for Computational Linguistics. doi: 10.18653/v1/2023.findings-emnlp.101. URL https://aclanthology.org/2023-findings-emnlp.101/. +Aman Madaan, Niket Tandon, Prakhar Gupta, Skyler Hallinan, Luyu Gao, Sarah Wiegreffe, Uri Alon, Nouha Dziri, Shrimai Prabhumoye, Yiming Yang, Shashank Gupta, Bodhisattwa Prasad Majumder, Katherine Hermann, Sean Welleck, Amir Yazdanbakhsh, and Peter Clark. Self-refine: Iterative refinement with self-feedback. In Thirty-seventh Conference on Neural Information Processing Systems, 2023b. URL https://openreview.net/forum?id=S37h0erQLB. + +Aman Madaan, Niket Tandon, Prakhar Gupta, Skyler Hallinan, Luyu Gao, Sarah Wiegreffe, Uri Alon, Nouha Dziri, Shrimai Prabhumoye, Yiming Yang, et al. Self-refine: Iterative refinement with self-feedback. Advances in Neural Information Processing Systems, 36, 2024. +Dakota Mahan, Duy Van Phung, Rafael Rafailov, Chase Blagden, Nathan Lile, Louis Castricato, Jan-Philipp Franken, Chelsea Finn, and Alon Albalak. Generative reward models. arXiv preprint arXiv:2410.12832, 2024. +XTX Markets. AIMO Progress Prize: July 2024 results. https://aimoprize.com/updates/2024-07-20-progress-prize-results, 2024. +Tula Masterman, Sandi Besen, Mason Sawtell, and Alex Chao. The landscape of emerging ai agent architectures for reasoning, planning, and tool calling: A survey, 2024. URL https://arxiv.org/abs/2404.11584. +Marco Matta, Gian Carlo Cardarilli, Luca Di Nunzio, Rocco Fazzolari, Daniele Giardino, M Re, F Silvestri, and S Spanò. Q-rts: a real-time swarm intelligence based on multi-agent q-learning. _Electronics Letters_, 55(10):589–591, 2019. +Nat McAleese, Rai Michael Pokorny, Juan Felipe Ceron Uribe, Evgenia Nitishinskaya, Maja Trebacz, and Jan Leike. Llm critics help catch llm bugs. arXiv preprint arXiv:2407.00215, 2024. +Raja Sekhar Reddy Mekala, Yasaman Razeghi, and Sameer Singh. EchoPrompt: Instructing the model to rephrase queries for improved in-context learning. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 2: Short Papers), pp. 399-432, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.naacl-short.35. URL https://aclanthology.org/2024.naacl-short.35. +Yu Meng, Mengzhou Xia, and Danqi Chen. Simpo: Simple preference optimization with a reference-free reward. arXiv preprint arXiv:2405.14734, 2024. +William Merrill and Ashish Sabharwal. The expressive power of transformers with chain of thought. In *The Twelfth International Conference on Learning Representations*, 2024. URL https://openreview.net/forum?id=NjNGLPh8Wh. +Yingqian Min, Zhipeng Chen, Jinhao Jiang, Jie Chen, Jia Deng, Yiwen Hu, Yiru Tang, Jiapeng Wang, Xiaoxue Cheng, Huatong Song, et al. Imitate, explore, and self-improve: A reproduction report on slow-thinking reasoning systems. arXiv preprint arXiv:2412.09413, 2024. +Yifei Ming, Senthil Purushwalkam, Shrey Pandit, Zixuan Ke, Xuan-Phi Nguyen, Caiming Xiong, and Shafiq Joty. Faitheval: Can your language model stay faithful to context, even if "the moon is made of marshmallows". In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=UeVx6L59fg. +Seyed Iman Mirzadeh, Keivan Alizadeh, Hooman Shahrokhi, Oncel Tuzel, Samy Bengio, and Mehrdad Farajtabar. GSM-symbolic: Understanding the limitations of mathematical reasoning in large language models. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=AjXkRZIvjb. +Swaroop Mishra, Daniel Khashabi, Chitta Baral, and Hannaneh Hajishirzi. Cross-task generalization via natural language crowdsourcing instructions. In Smaranda Muresan, Preslav Nakov, and Aline Villavicencio (eds.), Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 3470-3487, Dublin, Ireland, May 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.acl-long.244. URL https://aclanthology.org/2022.acl-long.244/. +Philipp Mondorf and Barbara Plank. Beyond accuracy: Evaluating the reasoning behavior of large language models - a survey. In First Conference on Language Modeling, 2024. URL https://openreview.net/forum?id=Lmjgl2n11u. + +Nieves Montes, Michael Luck, Nardine Osman, Odinaldo Rodrigues, and Carles Sierra. Combining theory of mind and abductive reasoning in agent-oriented programming. Autonomous Agents and Multi-Agent Systems, 37(2):36, 2023. +Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettle-moyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling, 2025. +Md Mahadi Hasan Nahid and Davood Rafiei. NormTab: Improving symbolic reasoning in LLMs through tabular data normalization. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (eds.), Findings of the Association for Computational Linguistics: EMNLP 2024, pp. 3569-3585, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-emnlp.203. URL https://aclanthology.org/2024 findings-emnlp.203/. +Allen Newell, John C Shaw, and Herbert A Simon. Report on a general problem solving program. In IFIP congress, volume 256, pp. 64. Pittsburgh, PA, 1959. +Allen Newell, Herbert Alexander Simon, et al. Human problem solving, volume 104. Prentice-hall Englewood Cliffs, NJ, 1972. +Khanh Nguyen, Hal Daumé III, and Jordan Boyd-Graber. Reinforcement learning for bandit neural machine translation with simulated human feedback. arXiv preprint arXiv:1707.07402, 2017. +Ansong Ni, Miltiadis Allamanis, Arman Cohan, Yinlin Deng, Kensen Shi, Charles Sutton, and Pengcheng Yin. Next: Teaching large language models to reason about code execution. In ICML, 2024. URL https://openreview.net/forum?id=B1W712hMBi. +Tobias Nipkow, Markus Wenzel, and Lawrence C Paulson. Isabelle/HOL: a proof assistant for higher-order logic. 2002. +NovaSky Team. Sky-t1: Train your own o1 preview model within $450. https://novaskyai.github.io/posts/sky-t1, 2025. Accessed: 2025-01-09. +Maxwell Nye, Anders Andreassen, Guy Gur-Ari, Henryk Witold Michalewski, Jacob Austin, David Bieber, David Martin Dohan, Aitor Lewkowycz, Maarten Paul Bosma, David Luan, Charles Sutton, and Augustus Odena. Show your work: Scratchpads for intermediate computation with language models, 2021. https://arxiv.org/abs/2112.00114. +Catherine Olsson, Nelson Elhage, Neel Nanda, Nicholas Joseph, Nova DasSarma, Tom Henighan, Ben Mann, Amanda Askell, Yuntao Bai, Anna Chen, et al. In-context learning and induction heads. arXiv preprint arXiv:2209.11895, 2022. +OpenAI. Introducing gpt-4.5. https://openai.com/index/introducing-gpt-4-5/, 2025. +OpenAI, :, Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, Alex Iftimie, Alex Karpenko, Alex Tachard Passos, Alexander Neitz, Alexander Prokofiev, Alexander Wei, Allison Tam, Ally Bennett, Ananya Kumar, Andre Saraiva, Andrea Vallone, Andrew Duberstein, Andrew Kondrich, Andrey Mishchenko, Andy Applebaum, Angela Jiang, Ashvin Nair, Barret Zoph, Behrooz Ghorbani, Ben Rossen, Benjamin Sokolowsky, Boaz Barak, Bob McGrew, Borys Minaiev, Botao Hao, Bowen Baker, Brandon Houghton, Brandon McKinzie, Brydon Eastman, Camillo Lugaresi, Cary Bassin, Cary Hudson, Chak Ming Li, Charles de Bourcy, Chelsea Voss, Chen Shen, Chong Zhang, Chris Koch, Chris Orsinger, Christopher Hesse, Claudia Fischer, Clive Chan, Dan Roberts, Daniel Kappler, Daniel Levy, Daniel Selsam, David Dohan, David Farhi, David Mely, David Robinson, Dimitris Tsipras, Doug Li, Dragos Oprica, Eben Freeman, Eddie Zhang, Edmund Wong, Elizabeth Proehl, Enoch Cheung, Eric Mitchell, Eric Wallace, Erik Ritter, Evan Mays, Fan Wang, Felipe Petroski Such, Filippo Raso, Florencia Leoni, Foivos Tsimpourlas, Francis Song, Fred von Lohmann, Freddie Sulit, Geoff Salmon, Giambattista Parascandolo, Gildas Chabot, Grace Zhao, Greg Brockman, Guillaume Leclerc, Hadi Salman, Haiming Bao, Hao Sheng, Hart Andrin, Hessam Bagherinezhad, Hongyu Ren, Hunter Lightman, Hyung Won Chung, Ian Kivlichan, Ian O'Connell, Ian Osband, Ignasi Clavera Gilaberte, Ilge Akkaya, Ilya Kostrikov, Ilya Sutskever, Irina Kofman, Jakub Pachocki, James Lennon, Jason + +Wei, Jean Harb, Jerry Twore, Jiacheng Feng, Jiahui Yu, Jiayi Weng, Jie Tang, Jieqi Yu, Joaquin Quñonero Candela, Joe Palermo, Joel Parish, Johannes Heidecke, John Hallman, John Rizzo, Jonathan Gordon, Jonathan Uesato, Jonathan Ward, Joost Huizinga, Julie Wang, Kai Chen, Kai Xiao, Karan Singhal, Karina Nguyen, Karl Cobbe, Katy Shi, Kayla Wood, Kendra Rimbach, Keren Gu-Lemberg, Kevin Liu, Kevin Lu, Kevin Stone, Kevin Yu, Lama Ahmad, Lauren Yang, Leo Liu, Leon Maksin, Leyton Ho, Liam Fedus, Lilian Weng, Linden Li, Lindsay McCallum, Lindsey Held, Lorenz Kuhn, Lukas Kondraciuk, Lukasz Kaiser, Luke Metz, Madelaine Boyd, Maja Trebacz, Manas Joglekar, Mark Chen, Marko Tintor, Mason Meyer, Matt Jones, Matt Kaufer, Max Schwarzer, Meghan Shah, Mehmet Yatbaz, Melody Y. Guan, Mengyuan Xu, Mengyuan Yan, Mia Glaese, Mianna Chen, Michael Lampe, Michael Malek, Michele Wang, Michelle Fradin, Mike McClay, Mikhail Pavlov, Miles Wang, Mingxuan Wang, Mira Murati, Mo Bavarian, Mostafa Rohaninejad, Nat McAleese, Neil Chowdhury, Neil Chowdhury, Nick Ryder, Nikolas Tezak, Noam Brown, Ofir Nachum, Oleg Boiko, Oleg Murk, Olivia Watkins, Patrick Chao, Paul Ashbourne, Pavel Izmailov, Peter Zhokhov, Rachel Dias, Rahul Arora, Randall Lin, Rapha Gontijo Lopes, Raz Gaon, Reah Miyara, Reimar Leike, Renny Hwang, Rhythm Garg, Robin Brown, Roshan James, Rui Shu, Ryan Cheu, Ryan Greene, Saachi Jain, Sam Altman, Sam Toizer, Sam Toyer, Samuel Miserendino, Sandhini Agarwal, Santiago Hernandez, Sasha Baker, Scott McKinney, Scottie Yan, Shengjia Zhao, Shengli Hu, Shibani Santurkar, Shraman Ray Chaudhuri, Shuyuan Zhang, Siyuan Fu, Spencer Papay, Steph Lin, Suchir Balaji, Suvansh Sanjeev, Szymon Sidor, Tal Broda, Aidan Clark, Tao Wang, Taylor Gordon, Ted Sanders, Tejal Patwardhan, Thibault Sottiaux, Thomas Degry, Thomas Dimson, Tianhao Zheng, Timur Garipov, Tom Stasi, Trapit Bansal, Trevor Creech, Troy Peterson, Tyna Eloundou, Valerie Qi, Vineet Kosaraju, Vinnie Monaco, Vitchyr Pong, Vlad Fomenko, Weiyi Zheng, Wenda Zhou, Wes McCabe, Wojciech Zaremba, Yann Dubois, Yinghai Lu, Yining Chen, Young Cha, Yu Bai, Yuchen He, Yuchen Zhang, Yunyun Wang, Zheng Shao and Zhuohan Li. Openai o1 system card 2024. URL https://arxiv.org/abs/2412.16720. + +OpenAI, :, Ahmed El-Kishky, Alexander Wei, Andre Saraiva, Borys Minaev, Daniel Selsam, David Dohan, Francis Song, Hunter Lightman, Ignasi Clavera, Jakub Pachocki, Jerry Tworek, Lorenz Kuhn, Lukasz Kaiser, Mark Chen, Max Schwarzer, Mostafa Rohaninejad, Nat McAleese, o3 contributors, Oleg Mürk, Rhythm Garg, Rui Shu, Szymon Sidor, Vineet Kosaraju, and Wenda Zhou. Competitive programming with large reasoning models, 2025. URL https://arxiv.org/abs/2502.06807. +Long Ouyang, Jeff Wu, Xu Jiang, Diogo Almeida, Carroll L. Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, John Schulman, Jacob Hilton, Fraser Kelton, Luke Miller, Maddie Simens, Amanda Askell, Peter Welinder, Paul Christiano, Jan Leike, and Ryan Lowe. Training language models to follow instructions with human feedback, 2022. URL https://arxiv.org/abs/2203.02155. +Bo Pan, Jiaying Lu, Ke Wang, Li Zheng, Zhen Wen, Yingchaojie Feng, Minfeng Zhu, and Wei Chen. Agent-coord: Visually exploring coordination strategy for llm-based multi-agent collaboration. arXiv preprint arXiv:2404.11943, 2024a. +Liangming Pan, Alon Albalak, Xinyi Wang, and William Wang. Logic-LM: Empowering large language models with symbolic solvers for faithful logical reasoning. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Findings of the Association for Computational Linguistics: EMNLP 2023, pp. 3806-3824, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-emnlp.248. URL https://aclanthology.org/2023-findings-emnlp.248/. +Liangming Pan, Michael Saxon, Wenda Xu, Deepak Nathani, Xinyi Wang, and William Yang Wang. Automatically correcting large language models: Surveying the landscape of diverse automated correction strategies. Transactions of the Association for Computational Linguistics, 12:484-506, 2024b. doi: 10.1162/tacl_a_00660. URL https://aclanthology.org/2024.tacl-1.27/. +Bhargavi Paranjape, Julian Michael, Marjan Ghazvininejad, Hannaneh Hajishirzi, and Luke Zettlemoyer. Prompting contrastive explanations for commonsense reasoning tasks. In Chengqing Zong, Fei Xia, Wenjie Li, and Roberto Navigli (eds.), Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021, pp. 4179-4192, Online, August 2021. Association for Computational Linguistics. doi: 10.18653/v1/2021-findings-acl.366. URL https://aclanthology.org/2021-findings-acl.366/. + +Remo Pareschi. Abductive reasoning with the gpt-4 language model: Case studies from criminal investigation, medical practice, scientific research. _Sistema intelligenti_, 35(2):435-444, 2023. +John Arthur Passmore. Philosophical reasoning. 1961. +Pouya Pezeshkpour, Eser Kandogan, Nikita Bhutani, Sajjadur Rahman, Tom Mitchell, and Estevam Hruschka. Reasoning capacity in multi-agent systems: Limitations, challenges and human-centered solutions, 2024. URL https://arxiv.org/abs/2402.01108. +Gabriel Poesia, Kanishk Gandhi, Eric Zelikman, and Noah Goodman. Certified deductive reasoning with language models. Transactions on Machine Learning Research, 2024. ISSN 2835-8856. URL https://openreview.net/forum?id=yXnwrs2T16. +Mohammadreza Pourreza and Davood Rafiei. DIN-SQL: Decomposed in-context learning of text-to-SQL with self-correction. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=p53QDxSIc5. +Ben Prystawski, Michael Li, and Noah D. Goodman. Why think step by step? reasoning emerges from the locality of experience. In Alice Oh, Tristan Naumann, Amir Globerson, Kate Saenko, Moritz Hardt, and Sergey Levine (eds.), Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. URL http://papers.nips.cc/paper_files/paper/2023/bitstream/e0af79ad53a336b4c4b4f7e2a68eb609-Abstract-Conference.html. +Reid Pryzant, Dan Iter, Jerry Li, Yin Lee, Chenguang Zhu, and Michael Zeng. Automatic prompt optimization with "gradient descent" and beam search. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pp. 7957-7968, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.494. URL https://aclanthology.org/2023.emnlp-main.494/. +Pranav Putta, Edmund Mills, Naman Garg, Sumeet Motwani, Chelsea Finn, Divyansh Garg, and Rafael Rafailov. Agent q: Advanced reasoning and learning for autonomous ai agents. arXiv preprint arXiv:2408.07199, 2024. +Zhenting Qi, Hongyin Luo, Xuliang Huang, Zhuokai Zhao, Yibo Jiang, Xiangjun Fan, Himabindu Lakkaraju, and James Glass. Quantifying generalization complexity for large language models, 2024. URL https://arxiv.org/abs/2410.01769. +Shuofei Qiao, Honghao Gui, Chengfei Lv, Qianghuai Jia, Huajun Chen, and Ningyu Zhang. Making language models better tool learners with execution feedback. arXiv preprint arXiv:2305.13068, 2023a. +Shuofei Qiao, Yixin Ou, Ningyu Zhang, Xiang Chen, Yunzhi Yao, Shumin Deng, Chuanqi Tan, Fei Huang, and Huajun Chen. Reasoning with language model prompting: A survey. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 5368-5393, Toronto, Canada, July 2023b. URL https://aclanthology.org/2023.acl-long.294/. +Chengwei Qin, Wenhan Xia, Tan Wang, Fangkai Jiao, Yuchen Hu, Bosheng Ding, Ruirui Chen, and Shafiq Joty. Relevant or random: Can llms truly perform analogical reasoning? ACL-Findings, 2025. URL https://arxiv.org/abs/2404.12728. +Yiwei Qin, Xuefeng Li, Haoyang Zou, Yixiu Liu, Shijie Xia, Zhen Huang, Yixin Ye, Weizhe Yuan, Hector Liu, Yuanzhi Li, and Pengfei Liu. O1 replication journey: A strategic progress report - part 1, 2024. URL https://arxiv.org/abs/2410.18982. +Xihe Qiu, Haoyu Wang, Xiaoyu Tan, Chao Qu, Yujie Xiong, Yuan Cheng, Yinghui Xu, Wei Chu, and Yuan Qi. Towards collaborative intelligence: Propagating intentions and reasoning for multi-agent coordination with large language models, 2024. URL https://arxiv.org/abs/2407.12532. + +Yuxiao Qu, Tianjun Zhang, Naman Garg, and Aviral Kumar. Recursive introspection: Teaching language model agents how to self-improve. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024a. URL https://openreview.net/forum?id=DRC9pZwBwR. +Yuxiao Qu, Tianjun Zhang, Naman Garg, and Aviral Kumar. Recursive introspection: Teaching language model agents how to self-improve. arXiv preprint arXiv:2407.18219, 2024b. +Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D. Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. In Alice Oh, Tristan Naumann, Amir Globerson, Kate Saenko, Moritz Hardt, and Sergey Levine (eds.), Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. URL http://papers.nips.cc/paper_files/paper/2023/hash/a85b405ed65c6477a4fe8302b5e06ce7-AAbstract-Conference.html. +Nazneen Fatema Rajani, Bryan McCann, Caiming Xiong, and Richard Socher. Explain yourself! leveraging language models for commonsense reasoning. arXiv preprint arXiv:1906.02361, 2019. +Shyam Sundhar Ramesh, Yifan Hu, Iason Chaimalas, Viraj Mehta, Pier Giuseppe Sessa, Haitham Bou Ammar, and Ilija Bogunovic. Group robust preference optimization in reward-free rlhf. arXiv preprint arXiv:2405.20304, 2024. +Jingqing Ruan, Yali Du, Xuantang Xiong, Dengpeng Xing, Xiyun Li, Linghui Meng, Haifeng Zhang, Jun Wang, and Bo Xu. Gcs: Graph-based coordination strategy for multi-agent reinforcement learning. arXiv preprint arXiv:2201.06257, 2022. +Ohad Rubin, Jonathan Herzig, and Jonathan Berant. Learning to retrieve prompts for in-context learning. In Marine Carpuat, Marie-Catherine de Marneffé, and Ivan Vladimir Meza Ruiz (eds.), Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 2655-2671, Seattle, United States, July 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.naacl-main.191. URL https://aclanthology.org/2022.naacl-main.191/. +Stuart Russell and Peter Norvig. Artificial Intelligence: A Modern Approach. Prentice Hall, 3 edition, 2010. +Jon Saad-Falcon, Rajan Vivek, William Berrios, Nandita Shankar Naik, Matija Franklin, Bertie Vidgen, Amanpreet Singh, Douwe Kiela, and Shikib Mehri. Lmunit: Fine-grained evaluation with natural language unit tests. arXiv preprint arXiv:2412.13091, 2024. +Amir Saeidi, Shivanshu Verma, Aswin RRV, and Chitta Baral. Triple preference optimization: Achieving better alignment with less data in a single step optimization. arXiv preprint arXiv:2405.16681, 2024. +Victor Sanh, Albert Webson, Colin Raffel, Stephen Bach, Lintang Sutawika, Zaid Alyafeai, Antoine Chaffin, Arnaud Stiegler, Arun Raja, Manan Dey, M Saiful Bari, Canwen Xu, Urmish Thakker, Shanya Sharma Sharma, Eliza Szczechla, Taewoon Kim, Gunjan Chhablani, Nihal Nayak, Debajyoti Datta, Jonathan Chang, Mike Tian-Jian Jiang, Han Wang, Matteo Manica, Sheng Shen, Zheng Xin Yong, Harshit Pandey, Rachel Bawden, Thomas Wang, Trishala Neeraj, Jos Rozen, Abheesht Sharma, Andrea Santilli, Thibault Fevry, Jason Alan Fries, Ryan Teehan, Teven Le Scao, Stella Biderman, Leo Gao, Thomas Wolf, and Alexander M Rush. Multitask prompted training enables zero-shot task generalization. In International Conference on Learning Representations, 2022. URL https://openreview.net/forum?id=9Vrb9D0WI4. +Abulhair Saparov and He He. Language models are greedy reasoners: A systematic formal analysis of chain-of-thought. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=qFVVBzXxR2V. +William Saunders, Catherine Yeh, Jeff Wu, Steven Bills, Long Ouyang, Jonathan Ward, and Jan Leike. Self-critiquing models for assisting human evaluators. arXiv preprint arXiv:2206.05802, 2022. +Erik Schluntz and Barry Zhang. Building effective agents. https://www.anthropic.com/, Dec 2024. URL https://www.anthropic.com/research/building-effective-agents. + +John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017. +Melanie Sclar, Yejin Choi, Yulia Tsvetkov, and Alane Suhr. Quantifying language models' sensitivity to spurious features in prompt design or: How i learned to start worrying about prompt formatting. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=RIu51yNXjT. +S Seals and Valerie Shalin. Evaluating the deductive competence of large language models. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 8614-8630, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.naacl-long.476. URL https://aclanthology.org/2024.naacl-long.476/. +H Seo and D Lee. Reinforcement learning and strategic reasoning during social decision-making. In Decision Neuroscience, pp. 225-231. Elsevier, 2017. +Pier Giuseppe Sessa, Robert Dadashi, Léonard Hussenot, Johan Ferret, Nino Vieillard, Alexandre Ramé, Bobak Shahriari, Sarah Perrin, Abe Friesen, Geoffrey Cideron, Sertan Girgin, Piotr Stanczyk, Andrea Michi, Danila Sinopalnikov, Sabela Ramos, Amélie Héliou, Aliaksei Severyn, Matt Hoffman, Nikola Momchev, and Olivier Bachem. BOND: aligning llms with best-of-n distillation. CoRR, abs/2407.14622, 2024. URL https://doi.org/10.48550/arXiv.2407.14622. +Amrith Setlur, Chirag Nagpal, Adam Fisch, Xinyang Geng, Jacob Eisenstein, Rishabh Agarwal, Alekh Agarwal, Jonathan Berant, and Aviral Kumar. Rewarding progress: Scaling automated process verifiers for llm reasoning. arXiv preprint arXiv:2410.08146, 2024a. +Amrith Setlur, Chirag Nagpal, Adam Fisch, Xinyang Geng, Jacob Eisenstein, Rishabh Agarwal, Alekh Agarwal, Jonathan Berant, and Aviral Kumar. Rewarding progress: Scaling automated process verifiers for LLM reasoning. CoRR, abs/2410.08146, 2024b. doi: 10.48550/ARXIV.2410.08146. URL https://doi.org/10.48550/arXiv.2410.08146. +Murray Shanahan, Kyle McDonell, and Laria Reynolds. Role play with large language models. Nature, 623 (7987):493-498, 2023a. +Murray Shanahan, Kyle McDonell, and Laria Reynolds. Role-play with large language models, 2023b. URL https://arxiv.org/abs/2305.16367. +Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024. +Zhengliang Shi, Weiwei Sun, Shen Gao, Pengjie Ren, Zhumin Chen, and Zhaochun Ren. Generate-then-ground in retrieval-augmented generation for multi-hop question answering. arXiv preprint arXiv:2406.14891, 2024. +Noah Shinn, Federico Cassano, Ashwin Gopinath, Karthik Narasimhan, and Shunyu Yao. Reflexion: Language agents with verbal reinforcement learning. Advances in Neural Information Processing Systems, 36, 2024. +Kumar Shridhar, Koustuv Sinha, Andrew Cohen, Tianlu Wang, Ping Yu, Ramakanth Pasunuru, Mrinmaya Sachan, Jason Weston, and Asli Celikyilmaz. The art of llm refinement: Ask, refine, and trust. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 5872-5883, 2024. +Chenglei Si, Zhe Gan, Zhengyuan Yang, Shuohang Wang, Jianfeng Wang, Jordan Lee Boyd-Graber, and Lijuan Wang. Prompting GPT-3 to be reliable. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=98p5x51L5af. + +Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling LLM test-time compute optimally can be more effective than scaling model parameters. CoRR, abs/2408.03314, 2024. doi: 10.48550/ARXIV.2408.03314. URL https://doi.org/10.48550/arXiv.2408.03314. +Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling test-time compute optimally can be more effective than scaling LLM parameters. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=4FWAwZtd2n. +Yifan Song, Weimin Xiong, Xiutian Zhao, Dawei Zhu, Wenhao Wu, Ke Wang, Cheng Li, Wei Peng, and Sujian Li. Agentbank: Towards generalized llm agents via fine-tuning on $50000+$ interaction trajectories. arXiv preprint arXiv:2410.07706, 2024. +Zayne Sprague, Fangcong Yin, Juan Diego Rodriguez, Dongwei Jiang, Manya Wadhwa, Prasann Singhal, Xinyu Zhao, Xi Ye, Kyle Mahowald, and Greg Durrett. To cot or not to cot? chain-of-thought helps mainly on math and symbolic reasoning. arXiv preprint arXiv:2409.12183, 2024a. URL https://arxiv.org/pdf/2409.12183. +Zayne Sprague, Fangcong Yin, Juan Diego Rodriguez, Dongwei Jiang, Manya Wadhwa, Prasann Singhal, Xinyu Zhao, Xi Ye, Kyle Mahowald, and Greg Durrett. To cot or not to cot? chain-of-thought helps mainly on math and symbolic reasoning, 2024b. URL https://arxiv.org/abs/2409.12183. +Keith E Stanovich and Richard F West. Individual differences in reasoning: Implications for the rationality debate? Behavioral and Brain Sciences, 23(5):645-665, 2000. +Kaya Stechly, Matthew Marquez, and Subbarao Kambhampati. Gpt-4 doesn't know it's wrong: An analysis of iterative prompting for reasoning problems. arXiv preprint arXiv:2310.12397, 2023. +Kaya Stechly, Karthik Valmeekam, and Subbarao Kambhampati. On the self-verification limitations of large language models on reasoning and planning tasks. arXiv preprint arXiv:2402.08115, 2024. +Nisan Stiannon, Long Ouyang, Jeff Wu, Daniel M. Ziegler, Ryan Lowe, Chelsea Voss, Alec Radford, Dario Amodei, and Paul Christiano. Learning to summarize from human feedback. In Proceedings of the 34th International Conference on Neural Information Processing Systems, NIPS '20, Red Hook, NY, USA, 2020. Curran Associates Inc. ISBN 9781713829546. +Benedikt Stroebl, Sayash Kapoor, and Arvind Narayanan. Inference Scaling fLaws: The Limits of LLM Resampling with Imperfect Verifiers. arXiv preprint arXiv:2411.17501, 2024. +Vighnesh Subramaniam, Yilun Du, Joshua B Tenenbaum, Antonio Torralba, Shuang Li, and Igor Mordatch. Multiagent finetuning: Self improvement with diverse reasoning chains. arXiv preprint arXiv:2501.05707, 2025. +Yuan Sui, Mengyu Zhou, Mingjie Zhou, Shi Han, and Dongmei Zhang. Table meets llm: Can large language models understand structured table data? a benchmark and empirical study. In Proceedings of the 17th ACM International Conference on Web Search and Data Mining, WSDM '24, pp. 645-654, New York, NY, USA, 2024. Association for Computing Machinery. ISBN 9798400703713. doi: 10.1145/3616855.3635752. URL https://doi.org/10.1145/3616855.3635752. +Sainbayar Sukhbaatar, Rob Fergus, et al. Learning multiagent communication with backpropagation. Advances in neural information processing systems, 29, 2016. +Theodore R. Sumers, Shunyu Yao, Karthik Narasimhan, and Thomas L. Griffiths. Cognitive architectures for language agents, 2024. URL https://arxiv.org/abs/2309.02427. +Jiashuo Sun, Chengjin Xu, Lumingyuan Tang, Saizhuo Wang, Chen Lin, Yeyun Gong, Heung-Yeung Shum, and Jian Guo. Think-on-graph: Deep and responsible reasoning of large language model with knowledge graph. arXiv preprint arXiv:2307.07697, 2023. + +Jiaxing Sun, Weiquan Huang, Jiang Wu, Chenya Gu, Wei Li, Songyang Zhang, Hang Yan, and Conghui He. Benchmarking Chinese commonsense reasoning of LLMs: From Chinese-specifics to reasoning-memorization correlations. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 11205-11228, Bangkok, Thailand, August 2024a. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.604. URL https://aclanthology.org/2024.acl-long.604/. +Shichao Sun, Junlong Li, Weizhe Yuan, Ruifeng Yuan, Wenjie Li, and Pengfei Liu. The critique of critique. arXiv preprint arXiv:2401.04518, 2024b. +Zhiqing Sun, Longhui Yu, Yikang Shen, Weiyang Liu, Yiming Yang, Sean Welleck, and Chuang Gan. Easy-to-hard generalization: Scalable alignment beyond human supervision. CoRR, abs/2403.09472, 2024c. doi: 10.48550/ARXIV.2403.09472. URL https://doi.org/10.48550/arXiv.2403.09472. +Richard S Sutton. Reinforcement learning: An introduction. A Bradford Book, 2018. +Mirac Suzgun and Adam Tauman Kalai. Meta-prompting: Enhancing language models with task-agnostic scaffolding. arXiv preprint arXiv:2401.12954, 2024a. +Mirac Suzgun and Adam Tauman Kalai. Meta-prompting: Enhancing language models with task-agnostic scaffolding, 2024b. URL https://arxiv.org/abs/2401.12954. +Sijun Tan, Siyuan Zhuang, Kyle Montgomery, William Y Tang, Alejandro Cuadron, Chenguang Wang, Raluca Ada Popa, and Ion Stoica. Judgebench: A benchmark for evaluating llm-based judges. arXiv preprint arXiv:2410.12784, 2024. +Zhengyang Tang, Xingxing Zhang, Benyou Wang, and Furu Wei. Mathscale: Scaling instruction tuning for mathematical reasoning. In Forty-first International Conference on Machine Learning, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=Kjww7ZN47M. +Zhengyang Tang, Ziniu Li, Zhenyang Xiao, Tian Ding, Ruoyu Sun, Benyou Wang, Dayiheng Liu, Fei Huang, Tianyu Liu, Bowen Yu, and Junyang Lin. Enabling scalable oversight via self-evolving critic, 2025. URL https://arxiv.org/abs/2501.05727. +Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li, Carlos Guestrin, Percy Liang, and Tatsunori B. Hashimoto. Stanford alpaca: An instruction-following llama model. https://github.com/tatsu-lab/stanford_alpaca, 2023. +Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, Chuning Tang, Congcong Wang, Dehao Zhang, Enming Yuan, Enzhe Lu, Fengxiang Tang, Flood Sung, Guangda Wei, Guokun Lai, Haiqing Guo, Han Zhu, Hao Ding, Hao Hu, Hao Yang, Hao Zhang, Haotian Yao, Haotian Zhao, Haoyu Lu, Haoze Li, Haozhen Yu, Hongcheng Gao, Huabin Zheng, Huan Yuan, Jia Chen, Jianhang Guo, Jianlin Su, Jianzhou Wang, Jie Zhao, Jin Zhang, Jingyuan Liu, Junjie Yan, Junyan Wu, Lidong Shi, Ling Ye, Longhui Yu, Mengnan Dong, Neo Zhang, Ningchen Ma, Qiwei Pan, Qucheng Gong, Shaowei Liu, Shengling Ma, Shupeng Wei, Sihan Cao, Siying Huang, Tao Jiang, Weihao Gao, Weimin Xiong, Weiran He, Weixiao Huang, Wenhao Wu, Wenyang He, Xianghui Wei, Xianqing Jia, Xingzhe Wu, Xinran Xu, Xinxing Zu, Xinyu Zhou, Xuehai Pan, Y. Charles, Yang Li, Yangyang Hu, Yangyang Liu, Yanru Chen, Yejie Wang, Yibo Liu, Yidao Qin, Yifeng Liu, Ying Yang, Yiping Bao, Yulun Du, Yuxin Wu, Yuzhi Wang, Zaida Zhou, Zhaoji Wang, Zhaowei Li, Zhen Zhu, Zheng Zhang, Zhexu Wang, Zhilin Yang, Zhiqi Huang, Ziyao Xu, and Zonghan Yang. Kimi k1.5: Scaling reinforcement learning with llms, 2025. URL https://arxiv.org/abs/2501.12599. +Qwen Team. Qwen2.5: A party of foundation models, September 2024. URL https://qwenlm.github.io/blog/qwen2.5/. +Amitayush Thakur, George Tsoukalas, Yeming Wen, Jimmy Xin, and Swarat Chaudhuri. An in-context learning agent for formal theorem-proving. In Conference on Language Modeling (COLM), 2024. + +The Coq Development Team. The Coq Proof Assistant. 2024. URL https://coq.inria.fr/doc/V8.20.0/refman/index.html. Version 8.20.0. +Qingyuan Tian, Hanlun Zhu, Lei Wang, Yang Li, and Yunshi Lan. $\mathbf{R}^3$ prompting: Review, rephrase and resolve for chain-of-thought reasoning in large language models under noisy context. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Findings of the Association for Computational Linguistics: EMNLP 2023, pp. 1670-1685, Singapore, December 2023. Association for Computational Linguistics. doi: 10. 18653/v1/2023-findings-emnlp.114. URL https://aclanthology.org/2023-findings-emnlp.114/. +Ye Tian, Baolin Peng, Linfeng Song, Lifeng Jin, Dian Yu, Haitao Mi, and Dong Yu. Toward self-improvement of llms via imagination, searching, and criticizing. arXiv preprint arXiv:2404.12253, 2024. +Yuxuan Tong, Xiwen Zhang, Rui Wang, Ruidong Wu, and Junxian He. Dart-math: Difficulty-aware rejection tuning for mathematical problem-solving. CoRR, abs/2407.13690, 2024. doi: 10.48550/ARXIV.2407.13690. URL https://doi.org/10.48550/arXiv.2407.13690. +Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023. +Vince Trencsenyi, Agnieszka Mensfelt, and Kostas Stathis. Approximating human strategic reasoning with llm-enhanced recursive reasoners leveraging multi-agent hypergames. arXiv preprint arXiv:2502.07443, 2025. +Trieu H Trinh, Yuhuai Wu, Quoc V Le, He He, and Thang Luong. Solving olympiad geometry without human demonstrations. Nature, 2024. +Prapti Trivedi, Aditya Gulati, Oliver Molenschot, Meghana Arakkal Rajeev, Rajkumar Ramamurthy, Keith Stevens, Tanveesh Singh Chaudhery, Jahnavi Jambholkar, James Zou, and Nazneen Rajani. Self-rationalization improves llm as a fine-grained judge. arXiv preprint arXiv:2410.05495, 2024. +Jonathan Uesato, Nate Kushman, Ramana Kumar, Francis Song, Noah Siegel, Lisa Wang, Antonia Creswell, Geoffrey Irving, and Irina Higgins. Solving math word problems with process-and outcome-based feedback. arXiv preprint arXiv:2211.14275, 2022. +Karthik Valmeekam, Matthew Marquez, and Subbarao Kambhampati. Can large language models really improve by self-critiquing their own plans? arXiv preprint arXiv:2310.08118, 2023. +Pat Verga, Sebastian Hofstatter, Sophia Althammer, Yixuan Su, Aleksandra Piktus, Arkady Arkhangorodsky, Minjie Xu, Naomi White, and Patrick Lewis. Replacing judges with juries: Evaluating llm generations with a panel of diverse models. arXiv preprint arXiv:2404.18796, 2024. +Johannes Von Oswald, Eyvind Niklasson, Ettore Randazzo, Joao Sacramento, Alexander Mordvintsev, Andrey Zhmoginov, and Max Vlademyrov. Transformers learn in-context by gradient descent. In International Conference on Machine Learning, pp. 35151-35174. PMLR, 2023. +Tu Vu, Kalpesh Krishna, Salaheddin Alzubi, Chris Tar, Manaal Faruqui, and Yun-Hsuan Sung. Foundational autorators: Taming large language models for better automatic evaluation. arXiv preprint arXiv:2407.10817, 2024. +Xingchen Wan, Ruoxi Sun, Hootan Nakhost, and Sercan O Arik. Teach better or show smarter? on instructions and exemplars in automatic prompt optimization. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024a. URL https://openreview.net/forum?id=IdtoJVWvNx. +Yuxuan Wan, Wenxuan Wang, Yiliu Yang, Youliang Yuan, Jen-tse Huang, Pinjia He, Wenxiang Jiao, and Michael Lyu. LogicAsker: Evaluating and improving the logical reasoning ability of large language models. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (eds.), Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pp. 2124-2155, Miami, Florida, USA, November 2024b. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.128. URL https://aclanthology.org/2024.emnlp-main.128/. + +Ziyu Wan, Xidong Feng, Muning Wen, Stephen Marcus McAleer, Ying Wen, Weinan Zhang, and Jun Wang. Alphazero-like tree-search can guide large language model decoding and training. In _Forty-first International Conference on Machine Learning_, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net, 2024c. URL https://openreview.net/forum?id=C4OpREezgj. +Boshi Wang, Sewon Min, Xiang Deng, Jiaming Shen, You Wu, Luke Zettlemoyer, and Huan Sun. Towards understanding chain-of-thought prompting: An empirical study of what matters. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 2717-2739, Toronto, Canada, July 2023a. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.153. URL https://aclanthology.org/2023.acl-long.153/. +Han Wang, Archiki Prasad, Elias Stengel-Eskin, and Mohit Bansal. Soft self-consistency improves language model agents. arXiv preprint arXiv:2402.13212, 2024a. +Jiayu Wang, Yifei Ming, Zhenmei Shi, Vibhav Vineet, Xin Wang, Yixuan Li, and Neel Joshi. Is a picture worth a thousand words? delving into spatial reasoning for vision language models. In The Thirty-Eighth Annual Conference on Neural Information Processing Systems, 2024b. +Junlin Wang, Jue Wang, Ben Athiwaratkun, Ce Zhang, and James Zou. Mixture-of-agents enhances large language model capabilities, 2024c. URL https://arxiv.org/abs/2406.04692. +Lei Wang, Chen Ma, Xueyang Feng, Zeyu Zhang, Hao Yang, Jingsen Zhang, Zhiyuan Chen, Jiakai Tang, Xu Chen, Yankai Lin, Wayne Xin Zhao, Zhewei Wei, and Jirong Wen. A survey on large language model based autonomous agents. Frontiers of Computer Science, 18(6), March 2024d. ISSN 2095-2236. doi: 10.1007/s11704-024-40231-1. URL http://dx.doi.org/10.1007/s11704-024-40231-1. +Liang Wang, Nan Yang, and Furu Wei. Learning to retrieve in-context examples for large language models. In Yvette Graham and Matthew Purver (eds.), Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 1752-1767, St. Julian's, Malta, March 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.eacl-long.105/. +Peifeng Wang, Zhengyang Wang, Zheng Li, Yifan Gao, Bing Yin, and Xiang Ren. SCOTT: Self-consistent chain-of-thought distillation. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 5546-5558, Toronto, Canada, July 2023b. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.304. URL https://aclanthology.org/2023.acl-long.304/. +Peifeng Wang, Austin Xu, Yilun Zhou, Caiming Xiong, and Shafiq Joty. Direct judgement preference optimization. arXiv preprint arXiv:2409.14664, 2024f. +Peiyi Wang, Lei Li, Zhihong Shao, Runxin Xu, Damai Dai, Yifei Li, Deli Chen, Yu Wu, and Zhifang Sui. Math-shepherd: Verify and reinforce llms step-by-step without human annotations. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2024, Bangkok, Thailand, August 11-16, 2024, pp. 9426-9439. Association for Computational Linguistics, 2024g. URL https://doi.org/10.18653/v1/2024.acl-long.510. +Qineng Wang, Zihao Wang, Ying Su, Hanghang Tong, and Yangqiu Song. Rethinking the bounds of llm reasoning: Are multi-agent discussions the key?, 2024h. URL https://arxiv.org/abs/2402.18272. +Song Wang, Zihan Chen, Chengshuai Shi, Cong Shen, and Jundong Li. Mixture of demonstrations for in-context learning. Advances in Neural Information Processing Systems, 37:88091-88116, 2024i. +Tianlu Wang, Ping Yu, Xiaoqing Ellen Tan, Sean O'Brien, Ramakanth Pasunuru, Jane Dwivedi-Yu, Olga Golovneva, Luke Zettlemoyer, Maryam Fazel-Zarandi, and Asli Celikyilmaz. Shepherd: A critic for language model generation. arXiv preprint arXiv:2308.04592, 2023c. + +Tianlu Wang, Ilia Kulikov, Olga Golovneva, Ping Yu, Weizhe Yuan, Jane Dwivedi-Yu, Richard Yuanzhe Pang, Maryam Fazel-Zarandi, Jason Weston, and Xian Li. Self-taught evaluators. arXiv preprint arXiv:2408.02666, 2024j. +Xinyi Wang, Lucas Caccia, Oleksiy Ostapenko, Xingdi Yuan, and Alessandro Sordoni. Guiding language model reasoning with planning tokens. CoRR, abs/2310.05707, 2023d. doi: 10.48550/ARXIV.2310.05707. URL https://doi.org/10.48550/arXiv.2310.05707. +Xinyi Wang, Wanrong Zhu, Michael Saxon, Mark Steyvers, and William Yang Wang. Large language models are latent variable models: Explaining and finding good demonstrations for in-context learning. In Thirty-seventh Conference on Neural Information Processing Systems, 2023e. URL https://openreview.net/forum?id=BGvkwZEGt7. +Xuezhi Wang and Denny Zhou. Chain-of-thought reasoning without prompting. arXiv preprint arXiv:2402.10200, 2024. +Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc V Le, Ed H. Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models. In The Eleventh International Conference on Learning Representations, 2023f. URL https://openreview.net/forum?id=1PL1NIMMrw. +Yidong Wang, Zhuohao Yu, Wenjin Yao, Zhengran Zeng, Linyi Yang, Cunxiang Wang, Hao Chen, Chaoya Jiang, Rui Xie, Jindong Wang, et al. Pandalm: An automatic evaluation benchmark for llm instruction tuning optimization. In The Twelfth International Conference on Learning Representations, 2023g. +Yuqing Wang and Yun Zhao. Rupbench: Benchmarking reasoning under perturbations for robustness evaluation in large language models. arXiv preprint arXiv:2406.11020, 2024. +Zihan Wang, Yunxuan Li, Yuexin Wu, Liangchen Luo, Le Hou, Hongkun Yu, and Jingbo Shang. Multi-step problem solving through a verifier: An empirical analysis on model-induced process supervision. In Findings of the Association for Computational Linguistics: EMNLP 2024, Miami, Florida, USA, November 12-16, 2024, pp. 7309-7319. Association for Computational Linguistics, 2024k. URL https://aclanthology.org/2024-findings-emnlp.429. +Zihan Wang, Yunxuan Li, Yuexin Wu, Liangchen Luo, Le Hou, Hongkun Yu, and Jingbo Shang. Multi-step problem solving through a verifier: An empirical analysis on model-induced process supervision. arXiv preprint arXiv:2402.02658, 2024l. +Zihao Wang, Anji Liu, Haowei Lin, Jiaqi Li, Xiaojian Ma, and Yitao Liang. Rat: Retrieval augmented thoughts elicit context-aware reasoning in long-horizon generation. arXiv preprint arXiv:2403.05313, 2024m. +Zilong Wang, Hao Zhang, Chun-Liang Li, Julian Martin Eisenschlos, Vincent Perot, Zifeng Wang, Lesly Miculicich, Yasuhisa Fujii, Jingbo Shang, Chen-Yu Lee, and Tomas Pfister. Chain-of-table: Evolving tables in the reasoning chain for table understanding. In The Twelfth International Conference on Learning Representations, 2024n. URL https://openreview.net/forum?id=4L0xnS4GQM. +Peter Cathcart Wason and Philip Nicholas JohnsonLaird. Psychology of reasoning: Structure and content. Harvard University Press, 86, 1972. +Jason Wei, Yi Tay, Rishi Bommasani, Colin Raffel, Barret Zoph, Sebastian Borgeaud, Dani Yogatama, Maarten Bosma, Denny Zhou, Donald Metzler, et al. Emergent abilities of large language models. arXiv preprint arXiv:2206.07682, 2022a. +Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837, 2022b. + +Yuxiang Wei, Zhe Wang, Jiawei Liu, Yifeng Ding, and Lingming Zhang. Magicoder: Empowering code generation with OSS-instruct. In Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pp. 52632-52657. PMLR, 21-27 Jul 2024. URL https://proceedings.mlr.press/v235/wei24h.html. +Yuxiang Wei, Olivier Duchenne, Jade Copet, Quentin Carbonneaux, Lingming Zhang, Daniel Fried, Gabriel Synnaeve, Rishabh Singh, and Sida I. Wang. Swe-rl: Advancing llm reasoning via reinforcement learning on open software evolution, 2025. URL https://arxiv.org/abs/2502.18449. +Nathaniel Weir, Muhammad Khalifa, Linlu Qiu, Orion Weller, and Peter Clark. Learning to reason via program generation, emulation, and search. arXiv preprint arXiv:2405.16337, 2024. +Sean Welleck, Amanda Bertsch, Matthew Finlayson, Hailey Schoelkopf, Alex Xie, Graham Neubig, Ilia Kulikov, and Zaid Harchaoui. From decoding to meta-generation: Inference-time algorithms for large language models. arXiv preprint arXiv:2406.16838, 2024. +Ying Wen, Yaodong Yang, Rui Luo, Jun Wang, and Wei Pan. Probabilistic recursive reasoning for multi-agent reinforcement learning. arXiv preprint arXiv:1901.09207, 2019. +Lily Weng. Llm-powered autonomous agents. *Github*, 2023. URL https://lilianweng.github.io/posts/2023-06-23-agent/. +Martin Weyssow, Aton Kamanda, and Houari A. Sahraoui. Codeultrafeedback: An llm-as-a-judge dataset for aligning large language models to coding preferences. CoRR, abs/2403.09032, 2024. +Sarah Wegreffe, Ana Marasovic, and Noah A Smith. Measuring association between labels and free-text rationales. arXiv preprint arXiv:2010.12762, 2020. +Sarah Wiegrefe, Jack Hessel, Swabha Swayamdipta, Mark Riedl, and Yejin Choi. Reframing human-AI collaboration for generating free-text explanations. In Marine Carpuat, Marie-Catherine de Marneffe, and Ivan Vladimir Meza Ruiz (eds.), Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 632-658, Seattle, United States, July 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.naacl-main.47. URL https://aclanthology.org/2022.naacl-main.47/. +Ronald J Williams. Simple statistical gradient-following algorithms for connectionist reinforcement learning. Machine learning, 8:229-256, 1992. +Yuhuai Wu, Albert Jiang, Wenda Li, Markus Rabe, Charles Staats, Mateja Jamnik, and Christian Szegedy. Autoformalization with large language models. In Neural Information Processing Systems (NeurIPS), 2022. +Zhaofeng Wu, Linlu Qiu, Alexis Ross, Ekin Akyurek, Boyuan Chen, Bailin Wang, Najoung Kim, Jacob Andreas, and Yoon Kim. Reasoning or reciting? exploring the capabilities and limitations of language models through counterfactual tasks. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 1819-1862, Mexico City, Mexico, June 2024a. Association for Computational Linguistics. doi: 10.18653/v1/2024.naacl-long.102. URL https://aclanthology.org/2024.naacl-long.102/. +Zhenyu Wu, Qingkai Zeng, Zhihan Zhang, Zhaoxuan Tan, Chao Shen, and Meng Jiang. Enhancing mathematical reasoning in llms by stepwise correction. arXiv preprint arXiv:2410.12934, 2024b. +Zijian Wu, Suozhi Huang, Zhejian Zhou, Huaiyuan Ying, Jiayu Wang, Dahua Lin, and Kai Chen. Internl m2. 5-stepprover: Advancing automated theorem proving via expert iteration on large-scale lean problems. arXiv preprint arXiv:2410.15700, 2024c. + +Zhiheng Xi, Wenxiang Chen, Xin Guo, Wei He, Yiwen Ding, Boyang Hong, Ming Zhang, Junzhe Wang, Senjie Jin, Enyu Zhou, Rui Zheng, Xiaoran Fan, Xiao Wang, Limao Xiong, Yuhao Zhou, Weiran Wang, Changhao Jiang, Yicheng Zou, Xiangyang Liu, Zhangyue Yin, Shihan Dou, Rongxiang Weng, Wensen Cheng, Qi Zhang, Wenjuan Qin, Yongyan Zheng, Xipeng Qiu, Xuanjing Huang, and Tao Gui. The rise and potential of large language model based agents: A survey. arXiv preprint arXiv:2309.07864, 2023. +Zhiheng Xi, Dingwen Yang, Jixuan Huang, Jiafu Tang, Guanyu Li, Yiwen Ding, Wei He, Boyang Hong, Shihan Do, Wenyu Zhan, Xiao Wang, Rui Zheng, Tao Ji, Xiaowei Shi, Yitao Zhai, Rongxiang Weng, Jingang Wang, Xunliang Cai, Tao Gui, Zuxuan Wu, Qi Zhang, Xipeng Qiu, Xuanjing Huang, and YuGang Jiang. Enhancing llm reasoning via critique models with test-time and training-time supervision, 2024. URL https://arxiv.org/abs/2411.16579. +Sang Michael Xie, Aditi Raghunathan, Percy Liang, and Tengyu Ma. An explanation of in-context learning as implicit bayesian inference. In International Conference on Learning Representations, 2022. +Zhihui Xie, Liyu Chen, Weichao Mao, Jingjing Xu, Lingpeng Kong, et al. Teaching language models to critique via reinforcement learning. arXiv preprint arXiv:2502.03492, 2025. +Huajian Xin, Daya Guo, Zhihong Shao, Zhizhou Ren, Qihao Zhu, Bo Liu, Chong Ruan, Wenda Li, and Xiaodan Liang. Deepseek-prover: Advancing theorem proving in llms through large-scale synthetic data. CoRR, abs/2405.14333, 2024a. doi: 10.48550/ARXIV.2405.14333. URL https://doi.org/10.48550/arXiv.2405.14333. +Huajian Xin, ZZ Ren, Junxiao Song, Zhihong Shao, Wanjia Zhao, Haocheng Wang, Bo Liu, Liyue Zhang, Xuan Lu, Qiushi Du, et al. Deepseek-prover-v1. 5: Harnessing proof assistant feedback for reinforcement learning and monte-carlo tree search. arXiv preprint arXiv:2408.08152, 2024b. URL https://arxiv.org/abs/2408.08152. +Wei Xiong, Hanning Zhang, Chenlu Ye, Lichang Chen, Nan Jiang, and Tong Zhang. Self-rewarding correction for mathematical reasoning, 2025. URL https://arxiv.org/abs/2502.19613. +Austin Xu, Srijan Bansal, Yifei Ming, Semih Yavuz, and Shafiq Joty. Does context matter? contextual judgebench for evaluating llm-based judges in contextual settings. arXiv preprint arXiv:2503.15620, 2025a. +Austin Xu, Yilun Zhou, Xuan-Phi Nguyen, Caiming Xiong, and Shafiq Joty. J4r: Learning to judge with equivalent initial state group relative policy optimization. arXiv preprint arXiv:2505.13346, 2025b. +Can Xu, Qingfeng Sun, Kai Zheng, Xiubo Geng, Pu Zhao, Jiazhan Feng, Chongyang Tao, Qingwei Lin, and Daxin Jiang. Wizardlm: Empowering large pre-trained language models to follow complex instructions. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024a. +Fangzhi Xu, Qika Lin, Jiawei Han, Tianzhe Zhao, Jun Liu, and Erik Cambria. Are large language models really good logical reasoners? a comprehensive evaluation and beyond. IEEE Transactions on Knowledge and Data Engineering, 2025c. +Fengli Xu, Qianyue Hao, Zefang Zong, Jingwei Wang, Yunke Zhang, Jingyi Wang, Xiaochong Lan, Jiahui Gong, Tianjian Ouyang, Fanjin Meng, et al. Towards large reasoning models: A survey of reinforced reasoning with large language models. arXiv preprint arXiv:2501.09686, 2025d. +Hanwei Xu, Yujun Chen, Yulun Du, Nan Shao, Wang Yanggang, Haiyu Li, and Zhilin Yang. GPS: Genetic prompt search for efficient few-shot learning. In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang (eds.), Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 8162-8171, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.emnlp-main.559. URL https://aclanthology.org/2022.emnlp-main.559/. + +Haotian Xu, Xing Wu, Weinong Wang, Zhongzhi Li, Da Zheng, Boyuan Chen, Yi Hu, Shijia Kang, Jiaming Ji, Yingying Zhang, et al. Redstar: Does scaling long-cot data unlock better slow-reasoning systems? arXiv preprint arXiv:2501.11284, 2025e. +Kehan Xu, Kun Zhang, Jingyuan Li, Wei Huang, and Yuanzhuo Wang. Crp-rag: A retrieval-augmented generation framework for supporting complex logical reasoning and knowledge planning. _Electronics_, 14 (1):47, 2024b. +Xiaohan Xu, Ming Li, Chongyang Tao, Tao Shen, Reynold Cheng, Jinyang Li, Can Xu, Dacheng Tao, and Tianyi Zhou. A survey on knowledge distillation of large language models. arXiv preprint arXiv:2402.13116, 2024c. +Yige Xu, Xu Guo, Zhiwei Zeng, and Chunyan Miao. SoftCoT: Soft chain-of-thought for efficient reasoning with llms. arXiv preprint arXiv:2502.12134, 2025f. +Zhiwei Xu, Yunpeng Bai, Bin Zhang, Dapeng Li, and Guoliang Fan. Haven: Hierarchical cooperative multiagent reinforcement learning with dual coordination mechanism. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 37, pp. 11735-11743, 2023. +Yuchen Yan, Jin Jiang, Yang Liu, Yixin Cao, Xin Xu, Xunliang Cai, Jian Shao, et al. S3c-math: Spontaneous step-level self-correction makes large language models better mathematical reasoners. arXiv preprint arXiv:2409.01524, 2024. +An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, et al. Qwen2 technical report. arXiv preprint arXiv:2407.10671, 2024a. +An Yang, Beichen Zhang, Binyuan Hui, Bofei Gao, Bowen Yu, Chengpeng Li, Dayiheng Liu, Jianhong Tu, Jingren Zhou, Junyang Lin, et al. Qwen2. 5-math technical report: Toward mathematical expert model via self-improvement. arXiv preprint arXiv:2409.12122, 2024b. +Chengrun Yang, Xuezhi Wang, Yifeng Lu, Hanxiao Liu, Quoc V Le, Denny Zhou, and Xinyun Chen. Large language models as optimizers. In The Twelfth International Conference on Learning Representations, 2024c. URL https://openreview.net/forum?id=Bb4VGOWELI. +Jinghan Yang, Shuming Ma, and Furu Wei. Auto-icl: In-context learning without human supervision. arXiv preprint arXiv:2311.09263, 2023a. URL https://arxiv.org/abs/2311.09263. +Kaiyu Yang, Aidan Swope, Alex Gu, Rahul Chalamala, Peiyang Song, Shixing Yu, Saad Godil, Ryan Prenger, and Anima Anandkumar. LeanDojo: Theorem proving with retrieval-augmented language models. In Neural Information Processing Systems (NeurIPS), 2023b. +Kaiyu Yang, Gabriel Poesia, Jingxuan He, Wenda Li, Kristin Lauter, Swarat Chaudhuri, and Dawn Song. Formal mathematical reasoning: A new frontier in ai. arXiv preprint arXiv:2412.16075, 2024d. +Ruihan Yang, Jiangjie Chen, Yikai Zhang, Siyu Yuan, Aili Chen, Kyle Richardson, Yanghua Xiao, and Deqing Yang. Selfgoal: Your language agents already know how to achieve high-level goals. arXiv preprint arXiv:2406.04784, 2024e. +Zonglin Yang, Li Dong, Xinya Du, Hao Cheng, Erik Cambria, Xiaodong Liu, Jianfeng Gao, and Furu Wei. Language models as inductive reasoners. In Yvette Graham and Matthew Purver (eds.), Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 209-225, St. Julian's, Malta, March 2024f. Association for Computational Linguistics. URL https://aclanthology.org/2024.eacl-long.13/. +Shunyu Yao and Karthik Narasimhan. Language agents in the digital world: Opportunities and risks. _princeton-nlp.github.io_, Jul 2023. URL https://princeton-nlp.github.io/language-agent-impact/. +Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Thomas L. Griffiths, Yuan Cao, and Karthik R Narasimhan. Tree of thoughts: Deliberate problem solving with large language models. In Thirty-seventh Conference on Neural Information Processing Systems, 2023a. URL https://openreview.net/forum?id=5Xc1ecx01h. + +Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik Narasimhan, and Yuan Cao. Re-Act: Synergizing reasoning and acting in language models. In International Conference on Learning Representations (ICLR), 2023b. +Weiran Yao, Shelby Heinecke, Juan Carlos Niebles, Zhiwei Liu, Yihao Feng, Le Xue, Rithesh Murthy, Zeyuan Chen, Jianguo Zhang, Devansh Arpit, et al. Retroformer: Retrospective large language agents with policy gradient optimization. arXiv preprint arXiv:2308.02151, 2023c. +Michihiro Yasunaga, Xinyun Chen, Yujia Li, Panupong Pasupat, Jure Leskovec, Percy Liang, Ed H. Chi, and Denny Zhou. Large language models as analogical reasoners. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=AgDICX1h50. +He Ye, Matias Martinez, Xiapu Luo, Tao Zhang, and Martin Monperrus. Selfapr: Self-supervised program repair with test execution diagnostics. In Proceedings of the 37th IEEE/ACM International Conference on Automated Software Engineering, pp. 1-13, 2022. URL https://arxiv.org/abs/2203.12755. +Jiacheng Ye, Zhiyong Wu, Jiangtao Feng, Tao Yu, and Lingpeng Kong. Compositional exemplars for in-context learning. In International Conference on Machine Learning, pp. 39818-39833. PMLR, 2023a. +Yixin Ye, Zhen Huang, Yang Xiao, Ethan Chern, Shijie Xia, and Pengfei Liu. Limo: Less is more for reasoning. arXiv preprint arXiv:2502.03387, 2025. +Yunhu Ye, Binyuan Hui, Min Yang, Binhua Li, Fei Huang, and Yongbin Li. Large language models are versatile decomposers: Decomposing evidence and questions for table-based reasoning. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '23, pp. 174-184, New York, NY, USA, 2023b. Association for Computing Machinery. ISBN 9781450394086. doi: 10.1145/3539618.3591708. URL https://doi.org/10.1145/3539618.3591708. +Yunhu Ye, Binyuan Hui, Min Yang, Binhua Li, Fei Huang, and Yongbin Li. Large language models are versatile decomposers: Decomposing evidence and questions for table-based reasoning. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '23, pp. 174-184, New York, NY, USA, 2023c. Association for Computing Machinery. ISBN 9781450394086. doi: 10.1145/3539618.3591708. URL https://doi.org/10.1145/3539618.3591708. +Yunhu Ye, Binyuan Hui, Min Yang, Binhua Li, Fei Huang, and Yongbin Li. Large language models are versatile decomposers: Decomposing evidence and questions for table-based reasoning. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '23, pp. 174-184, New York, NY, USA, 2023d. Association for Computing Machinery. ISBN 9781450394086. doi: 10.1145/3539618.3591708. URL https://doi.org/10.1145/3539618.3591708. +Ziyi Ye, Xiangsheng Li, Qiuchi Li, Qingyao Ai, Yujia Zhou, Wei Shen, Dong Yan, and Yiqun Liu. Beyond scalar reward model: Learning generative judge from preference data. arXiv preprint arXiv:2410.03742, 2024. +Edward Yeo, Yuxuan Tong, Morry Niu, Graham Neubig, and Xiang Yue. Demystifying long chain-of-thought reasoning in llms. arXiv preprint arXiv:2502.03373, 2025. +Shuo Yin, Weihao You, Zhilong Ji, Guoqiang Zhong, and Jinfeng Bai. Mumath-code: Combining tool-use large language models with multi-perspective data augmentation for mathematical reasoning. arXiv preprint arXiv:2405.07551, 2024. +Fei Yu, Hongbo Zhang, Prayag Tiwari, and Benyou Wang. Natural language reasoning, a survey. ACM Computing Surveys, 56(12):1-39, 2024a. +Longhui Yu, Weisen Jiang, Han Shi, Jincheng Yu, Zhengying Liu, Yu Zhang, James T Kwok, Zhenguo Li, Adrian Weller, and Weiyang Liu. Metamath: Bootstrap your own mathematical questions for large language models. arXiv preprint arXiv:2309.12284, 2023a. + +Longhui Yu, Weisen Jiang, Han Shi, YU Jincheng, Zhengying Liu, Yu Zhang, James Kwok, Zhenguo Li, Adrian Weller, and Weiyang Liu. MetaMath: Bootstrap your own mathematical questions for large language models. In International Conference on Learning Representations (ICLR), 2024b. +Longhui Yu, Weisen Jiang, Han Shi, Jincheng Yu, Zhengying Liu, Yu Zhang, James T. Kwok, Zhenguo Li, Adrian Weller, and Weiyang Liu. Metamath: Bootstrap your own mathematical questions for large language models. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024c. +Qiying Yu, Zheng Zhang, Ruofei Zhu, Yufeng Yuan, Xiaochen Zuo, Yu Yue, Tiantian Fan, Gaohong Liu, Lingjun Liu, Xin Liu, Haibin Lin, Zhiqi Lin, Bole Ma, Guangming Sheng, Yuxuan Tong, Chi Zhang, Mofan Zhang, Wang Zhang, Hang Zhu, Jinhua Zhu, Jiaze Chen, Jiangjie Chen, Chengyi Wang, Hongli Yu, Weinan Dai, Yuxuan Song, Xiangpeng Wei, Hao Zhou, Jingjing Liu, Wei-Ying Ma, Ya-Qin Zhang, Lin Yan, Mu Qiao, Yonghui Wu, and Mingxuan Wang. Dapo: An open-source llm reinforcement learning system at scale, 2025. URL https://arxiv.org/abs/2503.14476. +Zhouliang Yu, Jie Fu, Yao Mu, Chenguang Wang, Lin Shao, and Yaodong Yang. Multireact: Multimodal tools augmented reasoning-acting traces for embodied agent planning. 2023b. +Zhuohao Yu, Chang Gao, Wenjin Yao, Yidong Wang, Wei Ye, Jindong Wang, Xing Xie, Yue Zhang, and Shikun Zhang. Kieval: A knowledge-grounded interactive evaluation framework for large language models. arXiv preprint arXiv:2402.15043, 2024d. +Lifan Yuan, Ganqu Cui, Hanbin Wang, Ning Ding, Xingyao Wang, Jia Deng, Boji Shan, Huimin Chen, Ruobing Xie, Yankai Lin, Zhenghao Liu, Bowen Zhou, Hao Peng, Zhiyuan Liu, and Maosong Sun. Advancing LLM reasoning generalists with preference trees. CoRR, abs/2404.02078, 2024a. doi: 10.48550/ARXIV.2404.02078. URL https://doi.org/10.48550/arXiv.2404.02078. +Lifan Yuan, Wendi Li, Huayu Chen, Ganqu Cui, Ning Ding, Kaiyan Zhang, Bowen Zhou, Zhiyuan Liu, and Hao Peng. Free process rewards without process labels, 2024b. URL https://arxiv.org/abs/2412.01981. +Siyu Yuan, Kaitao Song, Jiangjie Chen, Xu Tan, Dongsheng Li, and Deqing Yang. Evoagent: Towards automatic multi-agent generation via evolutionary algorithms. arXiv preprint arXiv:2406.14228, 2024c. +Weizhe Yuan, Richard Yuanzhe Pang, Kyunghyun Cho, Sainbayar Sukhbaatar, Jing Xu, and Jason Weston. Self-rewarding language models. arXiv preprint arXiv:2401.10020, 2024d. +Zheng Yuan, Hongyi Yuan, Chengpeng Li, Guanting Dong, Chuanqi Tan, and Chang Zhou. Scaling relationship on learning mathematical reasoning with large language models. CoRR, abs/2308.01825, 2023. doi: 10.48550/ARXIV.2308.01825. URL https://doi.org/10.48550/arXiv.2308.01825. +Murong Yue, Wenlin Yao, Haitao Mi, Dian Yu, Ziyu Yao, and Dong Yu. DOTS: Learning to reason dynamically in LLMs via optimal reasoning trajectories search. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=tn2mjzjSyR. +Xiang Yue, Xingwei Qu, Ge Zhang, Yao Fu, Wenhao Huang, Huan Sun, Yu Su, and Wenhu Chen. Mammoth: Building math generalist models through hybrid instruction tuning. arXiv preprint arXiv:2309.05653, 2023. +Eric Zelikman, Yuhuai Wu, Jesse Mu, and Noah Goodman. STar: Bootstrapping reasoning with reasoning. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho (eds.), Advances in Neural Information Processing Systems, 2022. URL https://openreview.net/forum?id=3ELRdg2sgI. +Zhiyuan Zeng, Qinyuan Cheng, Zhangyue Yin, Bo Wang, Shimin Li, Yunhua Zhou, Qipeng Guo, Xuanjing Huang, and Xipeng Qiu. Scaling of search and learning: A roadmap to reproduce o1 from reinforcement learning perspective. arXiv preprint arXiv:2412.14135, 2024. +Dan Zhang, Sining Zhoubian, Ziniu Hu, Yisong Yue, Yuxiao Dong, and Jie Tang. Rest-mcts*: Llm self-training via process reward guided tree search. arXiv preprint arXiv:2406.03816, 2024a. + +Di Zhang, Jianbo Wu, Jingdi Lei, Tong Che, Jiatong Li, Tong Xie, Xiaoshui Huang, Shufei Zhang, Marco Pavone, Yuqiang Li, Wanli Ouyang, and Dongzhan Zhou. Llama-berry: Pairwise optimization for o1-like olympiad-level mathematical reasoning. CoRR, abs/2410.02884, 2024b. URL https://doi.org/10.48550/arXiv.2410.02884. +Jiayi Zhang, Jinyu Xiang, Zhaoyang Yu, Fengwei Teng, Xionghui Chen, Jiaqi Chen, Mingchen Zhuge, Xin Cheng, Sirui Hong, Jinlin Wang, Bingnan Zheng, Bang Liu, Yuyu Luo, and Chenglin Wu. Aflow: Automating agentic workflow generation, 2024c. URL https://arxiv.org/abs/2410.10762. +Jun Zhang, Trey Hedden, and Adrian Chia. Perspective-taking and depth of theory-of-mind reasoning in sequential-move games. Cognitive science, 36(3):560-573, 2012. +Kexun Zhang, Shang Zhou, Danqing Wang, William Yang Wang, and Lei Li. Scaling llm inference with optimized sample compute allocation. arXiv preprint arXiv:2410.22480, 2024d. +Lunjun Zhang, Arian Hosseini, Hritik Bansal, Mehran Kazemi, Aviral Kumar, and Rishabh Agarwal. Generative verifiers: Reward modeling as next-token prediction. In The 4th Workshop on Mathematical Reasoning and AI at NeurIPS'24, 2024e. URL https://openreview.net/forum?id=CxHRoTLmPX. +Lunjun Zhang, Arian Hosseini, Hritik Bansal, Mehran Kazemi, Aviral Kumar, and Rishabh Agarwal. Generative verifiers: Reward modeling as next-token prediction. arXiv preprint arXiv:2408.15240, 2024f. +Qizhen Zhang, Chris Lu, Animesh Garg, and Jakob Foerster. Centralized model and exploration policy for multi-agent rl. arXiv preprint arXiv:2107.06434, 2021. +Wentao Zhang, Lingxuan Zhao, Haochong Xia, Shuo Sun, Jiaze Sun, Molei Qin, Xinyi Li, Yuqing Zhao, Yilei Zhao, Xinyu Cai, et al. Finagent: A multimodal foundation agent for financial trading: Tool-augmented, diversified, and generalist. arXiv preprint arXiv:2402.18485, 2024g. +Xuan Zhang, Chao Du, Tianyu Pang, Qian Liu, Wei Gao, and Min Lin. Chain of preference optimization: Improving chain-of-thought reasoning in llms. CoRR, abs/2406.09136, 2024h. doi: 10.48550/ARXIV.2406.09136. URL https://doi.org/10.48550/arXiv.2406.09136. +Xuanliang Zhang, Dingzirui Wang, Longxu Dou, Qingfu Zhu, and Wanxiang Che. A survey of table reasoning with large language models. Frontiers of Computer Science, 19(9):199348, 2025a. +Yufeng Zhang, Fengzhuo Zhang, Zhuoran Yang, and Zhaoran Wang. What and how does in-context learning learn? bayesian model averaging, parameterization, and generalization. arXiv preprint arXiv:2305.19420, 2023. +Yunxiang Zhang, Muhammad Khalifa, Lajanugen Logeswaran, Jaekyeom Kim, Moontae Lee, Honglak Lee, and Lu Wang. Small language models need strong verifiers to self-correct reasoning. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Findings of the Association for Computational Linguistics: ACL 2024, pp. 15637-15653, Bangkok, Thailand, August 2024i. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-acl.924. URL https://aclanthology.org/2024-findings-acl.924/. +Zhehao Zhang, Yan Gao, and Jian-Guang Lou. $e^5$ : Zero-shot hierarchical table analysis using augmented LLMs via explain, extract, execute, exhibit and extrapolate. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 1244-1258, Mexico City, Mexico, June 2024j. Association for Computational Linguistics. doi: 10.18653/v1/2024.naacl-long.68. URL https://aclanthology.org/2024.naacl-long.68/. +Zhenru Zhang, Chujie Zheng, Yangzhen Wu, Beichen Zhang, Runji Lin, Bowen Yu, Dayiheng Liu, Jingren Zhou, and Junyang Lin. The lessons of developing process reward models in mathematical reasoning. arXiv preprint arXiv:2501.07301, 2025b. +Ruochen Zhao, Xingxuan Li, Shafiq Joty, Chengwei Qin, and Lidong Bing. Verify-and-edit: A knowledge-enhanced chain-of-thought framework. arXiv preprint arXiv:2305.03268, 2023. + +Chujie Zheng, Zhenru Zhang, Beichen Zhang, Runji Lin, Keming Lu, Bowen Yu, Dayiheng Liu, Jingren Zhou, and Junyang Lin. Processbench: Identifying process errors in mathematical reasoning. CoRR, abs/2412.06559, 2024. URL https://arxiv.org/abs/2412.06559. +Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric P. Xing, Hao Zhang, Joseph E. Gonzalez, and Ion Stoica. Judging llmas-a-judge with mt-bench and chatbot arena. In Alice Oh, Tristan Naumann, Amir Globerson, Kate Saenko, Moritz Hardt, and Sergey Levine (eds.), Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023a. URL http://papers.nips.cc/paper_files/paper/2023/bit/91f18a1287b398d378ef22505bf41832-AAbstract-Datasets_and_Benchmarks.html. +Rui Zheng, Shihan Dou, Songyang Gao, Yuan Hua, Wei Shen, Binghai Wang, Yan Liu, Senjie Jin, Qin Liu, Yuhao Zhou, et al. Secrets of rlhf in large language models part i: Ppo. arXiv preprint arXiv:2307.04964, 2023b. +Andy Zhou, Kai Yan, Michal Shlapentokh-Rothman, Haohan Wang, and Yu-Xiong Wang. Language agent tree search unifies reasoning, acting, and planning in language models. In Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pp. 62138-62160. PMLR, 21-27 Jul 2024a. +Denny Zhou, Nathanael Scharli, Le Hou, Jason Wei, Nathan Scales, Xuezhi Wang, Dale Schuurmans, Claire Cui, Olivier Bousquet, Quoc V Le, and Ed H. Chi. Least-to-most prompting enables complex reasoning in large language models. In The Eleventh International Conference on Learning Representations, 2023a. URL https://openreview.net/forum?id=WZH7099tgfM. +Han Zhou, Xingchen Wan, Ruoxi Sun, Hamid Palangi, Shariq Iqbal, Ivan Vulic, Anna Korhonen, and Sercan Ö. Ark. Multi-agent design: Optimizing agents with better prompts and topologies, 2025a. URL https://arxiv.org/abs/2502.02533. +Pei Zhou, Jay Pujara, Xiang Ren, Xinyun Chen, Heng-Tze Cheng, Quoc V Le, Ed H. Chi, Denny Zhou, Swaroop Mishra, and Steven Zheng. SELF-DISCOVER: Large language models self-compose reasoning structures. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024b. URL https://openreview.net/forum?id=BR0vXhmzYK. +Yilun Zhou, Austin Xu, Peifeng Wang, Caiming Xiong, and Shafiq Joty. Evaluating judges as evaluators: The jetst's benchmark of llm-as-judges as test-time scaling evaluators. arXiv preprint arXiv:2504.15253, 2025b. +Yongchao Zhou, Andrei Ioan Muresanu, Ziwen Han, Keiran Paster, Silviu Pitis, Harris Chan, and Jimmy Ba. Large language models are human-level prompt engineers. In The Eleventh International Conference on Learning Representations, 2023b. URL https://openreview.net/forum?id=92gvk82DE-. +Yuxiang Zhou, Jiazheng Li, Yanzheng Xiang, Hanqi Yan, Lin Gui, and Yulan He. The mystery of in-context learning: A comprehensive survey on interpretation and analysis. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pp. 14365-14378, 2024c. +Qihao Zhu, Daya Guo, Zhihong Shao, Dejian Yang, Peiyi Wang, Runxin Xu, Y Wu, Yukun Li, Huazuo Gao, Shirong Ma, et al. Deepseek-coder-v2: Breaking the barrier of closed-source models in code intelligence. arXiv preprint arXiv:2406.11931, 2024a. +Ying Zhu, Shengchang Li, Ziqian Kong, and Peilan Xu. Graph retrieval augmented trustworthiness reasoning. arXiv preprint arXiv:2408.12333, 2024b. +Mingchen Zhuge, Wenyi Wang, Louis Kirsch, Francesco Faccio, Dmitrii Khizbullin, and Jürgen Schmidhuber. Language agents as estimizable graphs, 2024. URL https://arxiv.org/abs/2402.16823. + +Jingming Zhuo, Songyang Zhang, Xinyu Fang, Haodong Duan, Dahua Lin, and Kai Chen. ProSA: Assessing and understanding the prompt sensitivity of LLMs. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (eds.), Findings of the Association for Computational Linguistics: EMNLP 2024, pp. 1950-1976, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-emnlp.108. URL https://aclanthology.org/2024 findings-emnlp.108/. +Daniel M. Ziegler, Nisan Stiennon, Jeffrey Wu, Tom B. Brown, Alec Radford, Dario Amodei, Paul F. Christiano, and Geoffrey Irving. Fine-tuning language models from human preferences. CoRR, abs/1909.08593, 2019. URL http://arxiv.org/abs/1909.08593. +Kaijian Zou, Muhammad Khalifa, and Lu Wang. Retrieval or global context understanding? on many-shot in-context learning for long-context evaluation. arXiv preprint arXiv:2411.07130, 2024. \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09037/images/0393d3443e276749833bf066a6b2bb5413d444ecafbcb3ac62400658a0872798.jpg b/data/2025/2504_09xxx/2504.09037/images/0393d3443e276749833bf066a6b2bb5413d444ecafbcb3ac62400658a0872798.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dc16a929d3e737169c3affd859f644dbfd1b8966 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09037/images/0393d3443e276749833bf066a6b2bb5413d444ecafbcb3ac62400658a0872798.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd246307670f623eaea2cef8cd506a47701c766faf56328bb79f269b6b6cbbfa +size 8364 diff --git a/data/2025/2504_09xxx/2504.09037/images/1e801304742ff47f8641315d48562cda363d39fe3b0246405be5a97501fa27ce.jpg b/data/2025/2504_09xxx/2504.09037/images/1e801304742ff47f8641315d48562cda363d39fe3b0246405be5a97501fa27ce.jpg new file mode 100644 index 0000000000000000000000000000000000000000..24fb055245c5d706e2612049162ba7747705d6b3 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09037/images/1e801304742ff47f8641315d48562cda363d39fe3b0246405be5a97501fa27ce.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b96cea4dddb6521a64a507619d0955917bff47b4799d21c4dbdede156c102399 +size 20952 diff --git a/data/2025/2504_09xxx/2504.09037/images/26c8347b750ab38973406a291c8177a4bbe8c7b3a3166b8f3370a1e920c5081b.jpg b/data/2025/2504_09xxx/2504.09037/images/26c8347b750ab38973406a291c8177a4bbe8c7b3a3166b8f3370a1e920c5081b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..daf82ea2aa4855ab99c6a326975dd7093b415625 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09037/images/26c8347b750ab38973406a291c8177a4bbe8c7b3a3166b8f3370a1e920c5081b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d3882d7d8e545a01e83f4c5f56fd05829f9107ec368ff941d7cb0c16799c10b +size 42517 diff --git a/data/2025/2504_09xxx/2504.09037/images/39de4b7e43614dd9fd6b20b5ea17613653be0964114d78d15e240a8be4b40062.jpg b/data/2025/2504_09xxx/2504.09037/images/39de4b7e43614dd9fd6b20b5ea17613653be0964114d78d15e240a8be4b40062.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3b3ffdf738718d5a04191dbbd10b0f2b18999c53 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09037/images/39de4b7e43614dd9fd6b20b5ea17613653be0964114d78d15e240a8be4b40062.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:768aafd2d701a4bd998db470027bbe4648a1995d73b61e5fdd8ed085796ae0f4 +size 8592 diff --git a/data/2025/2504_09xxx/2504.09037/images/3df501760c731b5b3866e2f732d8ca9368b17b759d192490f7d38cbc2fd05fd1.jpg b/data/2025/2504_09xxx/2504.09037/images/3df501760c731b5b3866e2f732d8ca9368b17b759d192490f7d38cbc2fd05fd1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5a4790faff0de131bc2f37a4e19aa8e73c8c887d --- /dev/null +++ b/data/2025/2504_09xxx/2504.09037/images/3df501760c731b5b3866e2f732d8ca9368b17b759d192490f7d38cbc2fd05fd1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fca6970e9deeea6dd754429c52ff2dd3996c8b1201074c44d6cfd334857f3b63 +size 52591 diff --git a/data/2025/2504_09xxx/2504.09037/images/408f8e9fa6d813d4f1b1c775fc2d71b99bc1df299e3b8e20cfde96688a1b7412.jpg b/data/2025/2504_09xxx/2504.09037/images/408f8e9fa6d813d4f1b1c775fc2d71b99bc1df299e3b8e20cfde96688a1b7412.jpg new file mode 100644 index 0000000000000000000000000000000000000000..73932264683e619b587c5f84fa4d99756ac3fae2 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09037/images/408f8e9fa6d813d4f1b1c775fc2d71b99bc1df299e3b8e20cfde96688a1b7412.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61f15e05ad51c89f64d707591c7b9cca73c22a17644e19cd4149b034801e5510 +size 12301 diff --git a/data/2025/2504_09xxx/2504.09037/images/42f2a813011d2945cd7d0f59d91cc7b3579bb147c2de1a3cb07f649bb422c1b6.jpg b/data/2025/2504_09xxx/2504.09037/images/42f2a813011d2945cd7d0f59d91cc7b3579bb147c2de1a3cb07f649bb422c1b6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6f5dbac664606febe31e598a97fb01da8665b455 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09037/images/42f2a813011d2945cd7d0f59d91cc7b3579bb147c2de1a3cb07f649bb422c1b6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1022345f4ef12da9cd07ab9d454d0e01cfa11e054eb65cb910a49f108e071e3 +size 5650 diff --git a/data/2025/2504_09xxx/2504.09037/images/43a26766ab3cc5994e2bc151322aa147605a9f885631b6df81f8c6dd61cc72f8.jpg b/data/2025/2504_09xxx/2504.09037/images/43a26766ab3cc5994e2bc151322aa147605a9f885631b6df81f8c6dd61cc72f8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c00f4bab7b51d8db1e790b31821ada5cd467e2b0 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09037/images/43a26766ab3cc5994e2bc151322aa147605a9f885631b6df81f8c6dd61cc72f8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9620ca72d37fbe9b06aea5c71b2a1605e127520666c23f685e0f6a57971931a1 +size 7747 diff --git a/data/2025/2504_09xxx/2504.09037/images/4aa0c61186f0c779a1bd0502eb36815676a9150805a28fd149dc247fb353c742.jpg b/data/2025/2504_09xxx/2504.09037/images/4aa0c61186f0c779a1bd0502eb36815676a9150805a28fd149dc247fb353c742.jpg new file mode 100644 index 0000000000000000000000000000000000000000..118c91065da79cfa62ce66cce2000efe40bb009c --- /dev/null +++ b/data/2025/2504_09xxx/2504.09037/images/4aa0c61186f0c779a1bd0502eb36815676a9150805a28fd149dc247fb353c742.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1cc53513caf795edb89dddd723ebbe7408fcfdc524a30a5957fef95cf6aed2b0 +size 8432 diff --git a/data/2025/2504_09xxx/2504.09037/images/60f990a388a33bd4469511e2fcb952ddf14d2a5717e7f7f12189e6a1dacfb1bf.jpg b/data/2025/2504_09xxx/2504.09037/images/60f990a388a33bd4469511e2fcb952ddf14d2a5717e7f7f12189e6a1dacfb1bf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d74bb1a34cb95b00d252fadf788ce36f56c7b925 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09037/images/60f990a388a33bd4469511e2fcb952ddf14d2a5717e7f7f12189e6a1dacfb1bf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:96721ef7df1cc041309a4b90d2f13edf815641838efe3d8f03102a92546c0ad4 +size 55434 diff --git a/data/2025/2504_09xxx/2504.09037/images/6468e2dd5f73b620b8760b6d78b4044d48515269db4a51d44ab8543841c582c9.jpg b/data/2025/2504_09xxx/2504.09037/images/6468e2dd5f73b620b8760b6d78b4044d48515269db4a51d44ab8543841c582c9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d248bd39a71f04bcf0951ee4bba299041b6cc193 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09037/images/6468e2dd5f73b620b8760b6d78b4044d48515269db4a51d44ab8543841c582c9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c765ecfff62f2d1c8e4497612b3eb7f0430a6ae6c23e7b7a9126452e45c20276 +size 67306 diff --git a/data/2025/2504_09xxx/2504.09037/images/67d40ba5d0383a1d1ca495c78f52298962e74f965a5a0cc2b362840f3cacbaab.jpg b/data/2025/2504_09xxx/2504.09037/images/67d40ba5d0383a1d1ca495c78f52298962e74f965a5a0cc2b362840f3cacbaab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6cb89fca6985457c76a486e09b293d6a8a008d89 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09037/images/67d40ba5d0383a1d1ca495c78f52298962e74f965a5a0cc2b362840f3cacbaab.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bbbda1f2d3f1ad8488309fc7bcb62949c47e841bfdaebe8b998c1bd38ac12521 +size 377011 diff --git a/data/2025/2504_09xxx/2504.09037/images/6d53eb6e4bfec2bd5f35a0eaeafe8115a0a508071d9e7276eeddfecbecd8c818.jpg b/data/2025/2504_09xxx/2504.09037/images/6d53eb6e4bfec2bd5f35a0eaeafe8115a0a508071d9e7276eeddfecbecd8c818.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ec419763e7cf443c706fa537e9b7177fc86c803e --- /dev/null +++ b/data/2025/2504_09xxx/2504.09037/images/6d53eb6e4bfec2bd5f35a0eaeafe8115a0a508071d9e7276eeddfecbecd8c818.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0beb1c5045c2cce8d34ecd6d47436645d71ca948362bb4b571379387cabcb48 +size 17134 diff --git a/data/2025/2504_09xxx/2504.09037/images/90bdd37917ff2f30ff386b7e3ffe255d3de4c023028d7e66579d66c8d88f6aae.jpg b/data/2025/2504_09xxx/2504.09037/images/90bdd37917ff2f30ff386b7e3ffe255d3de4c023028d7e66579d66c8d88f6aae.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7fe3b4359783a332dbccda6bda4b1e55063dccbc --- /dev/null +++ b/data/2025/2504_09xxx/2504.09037/images/90bdd37917ff2f30ff386b7e3ffe255d3de4c023028d7e66579d66c8d88f6aae.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b01ecd37c0bc87456e59bff5e6efd7d7bab0300acb35488017d26e6f488d8f76 +size 8757 diff --git a/data/2025/2504_09xxx/2504.09037/images/9a72af506464d7266485c42711ec30e3529535cc0cb4bc99428f137ea9d892be.jpg b/data/2025/2504_09xxx/2504.09037/images/9a72af506464d7266485c42711ec30e3529535cc0cb4bc99428f137ea9d892be.jpg new file mode 100644 index 0000000000000000000000000000000000000000..999389bea9e8441fd324fe5a2629be1a06a08296 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09037/images/9a72af506464d7266485c42711ec30e3529535cc0cb4bc99428f137ea9d892be.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b92c7ef9220ef4c02cd3bfd4ddb9ecdef7e004f0a41ec1bfcbea2f14ea8c1271 +size 71317 diff --git a/data/2025/2504_09xxx/2504.09037/images/9d408ce1bd52f2ac1a399692e04982fa9e638b7c08e8b740c6577799ac51cbb3.jpg b/data/2025/2504_09xxx/2504.09037/images/9d408ce1bd52f2ac1a399692e04982fa9e638b7c08e8b740c6577799ac51cbb3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cc34efce709957c93e9af3f7f89964d606f0d69e --- /dev/null +++ b/data/2025/2504_09xxx/2504.09037/images/9d408ce1bd52f2ac1a399692e04982fa9e638b7c08e8b740c6577799ac51cbb3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38dc02be0f6ed09773af470da61491ad89f131b9613ef2887baa941f6e73f14d +size 37230 diff --git a/data/2025/2504_09xxx/2504.09037/images/a33ad402bcef5028d88de2930c410286783822bc4e894664f1cd2dfc4b19da46.jpg b/data/2025/2504_09xxx/2504.09037/images/a33ad402bcef5028d88de2930c410286783822bc4e894664f1cd2dfc4b19da46.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5944f8126a0a056eb38c0cde96fcb297c7bc6fa0 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09037/images/a33ad402bcef5028d88de2930c410286783822bc4e894664f1cd2dfc4b19da46.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d8231be2ed7a707c0fe6f0a5add591b3122c21cd8f0f0793455df6708bb45a5 +size 12903 diff --git a/data/2025/2504_09xxx/2504.09037/images/a76aac3ee69b5e7e56531440627ceca98535c9e7ba6066bdb1b715823ec14a60.jpg b/data/2025/2504_09xxx/2504.09037/images/a76aac3ee69b5e7e56531440627ceca98535c9e7ba6066bdb1b715823ec14a60.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5b72248d4caed46823290993fe1e04dfaf6bb938 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09037/images/a76aac3ee69b5e7e56531440627ceca98535c9e7ba6066bdb1b715823ec14a60.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4dc6c5e4c76b3a7aa359c7873e5f708c9096166f9b51e1930ebbffed9e8fdb32 +size 32403 diff --git a/data/2025/2504_09xxx/2504.09037/images/acc497e70ddf56ff5155272a2c39df1404b99cdd1fd1aff432306a10918635ad.jpg b/data/2025/2504_09xxx/2504.09037/images/acc497e70ddf56ff5155272a2c39df1404b99cdd1fd1aff432306a10918635ad.jpg new file mode 100644 index 0000000000000000000000000000000000000000..67cce6a0d09ae21f3ef42333f95d208ab100e19c --- /dev/null +++ b/data/2025/2504_09xxx/2504.09037/images/acc497e70ddf56ff5155272a2c39df1404b99cdd1fd1aff432306a10918635ad.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1553f4b902cca1e20300a981aa98f1b6681e5adba059546bbbd9f05bfb75e761 +size 47612 diff --git a/data/2025/2504_09xxx/2504.09037/images/b56b161b40385ab764234fe1b8b74a6302e624a8559602cb7e9c4810b1a0ed83.jpg b/data/2025/2504_09xxx/2504.09037/images/b56b161b40385ab764234fe1b8b74a6302e624a8559602cb7e9c4810b1a0ed83.jpg new file mode 100644 index 0000000000000000000000000000000000000000..769ee04056f23c4c6986deacae2ad0985473a2f6 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09037/images/b56b161b40385ab764234fe1b8b74a6302e624a8559602cb7e9c4810b1a0ed83.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02e61212e02dbd8144263668d820ea6b65be67f774b67f84811891482b0fffa8 +size 31397 diff --git a/data/2025/2504_09xxx/2504.09037/images/b7ccdc1523c5cda97395d8e1c071124159cd2ad9b45eecf6b0cf01c435760963.jpg b/data/2025/2504_09xxx/2504.09037/images/b7ccdc1523c5cda97395d8e1c071124159cd2ad9b45eecf6b0cf01c435760963.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e394357dbe5a81e30db16c957ef1369b807bbf97 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09037/images/b7ccdc1523c5cda97395d8e1c071124159cd2ad9b45eecf6b0cf01c435760963.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8046b6bc3e28c07470a73a06452bf6fc3b55f14708f5568638b8cf1eb3a3fda +size 10758 diff --git a/data/2025/2504_09xxx/2504.09037/images/b9528171bce4bc3a7e670aa15ae19f22637e7db2734a43deaefcc1e7b1fdde5e.jpg b/data/2025/2504_09xxx/2504.09037/images/b9528171bce4bc3a7e670aa15ae19f22637e7db2734a43deaefcc1e7b1fdde5e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f5dadc18e0c8a0113d3beaa1ff237d41cdc79326 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09037/images/b9528171bce4bc3a7e670aa15ae19f22637e7db2734a43deaefcc1e7b1fdde5e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20c0235db72d34b0f7c4e62e0d61bcd469010b025afc25afbdada9fc4c17be02 +size 6028 diff --git a/data/2025/2504_09xxx/2504.09037/images/ccf703a6c16491ff1de1e5cf6ff471fa0b06b107d09e10325da106c385ddafd4.jpg b/data/2025/2504_09xxx/2504.09037/images/ccf703a6c16491ff1de1e5cf6ff471fa0b06b107d09e10325da106c385ddafd4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..15d9f9d0afddc654f4c81abb9df92d6b4b9d87f2 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09037/images/ccf703a6c16491ff1de1e5cf6ff471fa0b06b107d09e10325da106c385ddafd4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:983bfaf360b19568cc5cc60dc1714739c17b55b47efcf8e8051c5d263e4beb9b +size 72834 diff --git a/data/2025/2504_09xxx/2504.09037/images/d2d3ae2651fba2b47af954ed5cd41fbafa1fee7fb129a2b951985ad6c1721ac0.jpg b/data/2025/2504_09xxx/2504.09037/images/d2d3ae2651fba2b47af954ed5cd41fbafa1fee7fb129a2b951985ad6c1721ac0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0e570b65c36ae989e0662a8765456a1aa2447faf --- /dev/null +++ b/data/2025/2504_09xxx/2504.09037/images/d2d3ae2651fba2b47af954ed5cd41fbafa1fee7fb129a2b951985ad6c1721ac0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ebf15357410c524786a7771a50e5d60139409d9d8b85a8a86bd18742be8ad9e +size 78781 diff --git a/data/2025/2504_09xxx/2504.09037/images/dc2db5b5890553285102598a559db40fef121a42cef38c9a947e2f9dd3832578.jpg b/data/2025/2504_09xxx/2504.09037/images/dc2db5b5890553285102598a559db40fef121a42cef38c9a947e2f9dd3832578.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a46446bbc22e5ab1eb09b27c7749fe23da33d39c --- /dev/null +++ b/data/2025/2504_09xxx/2504.09037/images/dc2db5b5890553285102598a559db40fef121a42cef38c9a947e2f9dd3832578.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af6927f9eb2954aa744905ec663f45de36ed8b109190e932b4a01102030b83e8 +size 22779 diff --git a/data/2025/2504_09xxx/2504.09037/images/e8050e02c74b6b7f46934188f83a40262bec0f41217b4b9bef7d184af8a97f4e.jpg b/data/2025/2504_09xxx/2504.09037/images/e8050e02c74b6b7f46934188f83a40262bec0f41217b4b9bef7d184af8a97f4e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0056c52ee3366dab2c759cc2cb9c33b2fa00af21 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09037/images/e8050e02c74b6b7f46934188f83a40262bec0f41217b4b9bef7d184af8a97f4e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:519d6d7f7ea4bba64071773a843f3748f90010bd3279082b2c92420d8f2d6677 +size 7944 diff --git a/data/2025/2504_09xxx/2504.09037/images/f29e00273e605da0f7119a68c2e6571b38f75b90addfb3732aebec1cebca67eb.jpg b/data/2025/2504_09xxx/2504.09037/images/f29e00273e605da0f7119a68c2e6571b38f75b90addfb3732aebec1cebca67eb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..71423863c44a5168a290b59a9a807b497d1d297a --- /dev/null +++ b/data/2025/2504_09xxx/2504.09037/images/f29e00273e605da0f7119a68c2e6571b38f75b90addfb3732aebec1cebca67eb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:645181eb19bcca34669ed96a9de5688d9b9371daf159275463042bfa31989581 +size 85475 diff --git a/data/2025/2504_09xxx/2504.09037/images/ff18fb1a724ca38991f358e0f8a0b23cfed9336202da57dbd8f7c7110d2f0334.jpg b/data/2025/2504_09xxx/2504.09037/images/ff18fb1a724ca38991f358e0f8a0b23cfed9336202da57dbd8f7c7110d2f0334.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e2697690b6447a9edce9f64b6ade2280c6eda7e1 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09037/images/ff18fb1a724ca38991f358e0f8a0b23cfed9336202da57dbd8f7c7110d2f0334.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe1ec48a9c837e48b357a016f6d6fe57cc9eaf30ebbcbe94a430fd30f72d1d72 +size 109107 diff --git a/data/2025/2504_09xxx/2504.09037/layout.json b/data/2025/2504_09xxx/2504.09037/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..0b72e7c75462fe377795c74bc24876ab0569b9c0 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09037/layout.json @@ -0,0 +1,34397 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 69, + 78, + 539, + 120 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 78, + 539, + 120 + ], + "spans": [ + { + "bbox": [ + 69, + 78, + 539, + 120 + ], + "type": "text", + "content": "A Survey of Frontiers in LLM Reasoning: Inference Scaling, Learning to Reason, and Agentic Systems" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 144, + 129, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 144, + 129, + 156 + ], + "spans": [ + { + "bbox": [ + 69, + 144, + 129, + 156 + ], + "type": "text", + "content": "Zixuan Ke\\*" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 161, + 145, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 161, + 145, + 174 + ], + "spans": [ + { + "bbox": [ + 70, + 161, + 145, + 174 + ], + "type": "text", + "content": "Fangkai Jiao" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 178, + 129, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 178, + 129, + 191 + ], + "spans": [ + { + "bbox": [ + 70, + 178, + 129, + 191 + ], + "type": "text", + "content": "Yifei Ming*" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 196, + 166, + 208 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 196, + 166, + 208 + ], + "spans": [ + { + "bbox": [ + 70, + 196, + 166, + 208 + ], + "type": "text", + "content": "Xuan-Phi Nguyen*" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 213, + 128, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 213, + 128, + 223 + ], + "spans": [ + { + "bbox": [ + 70, + 213, + 128, + 223 + ], + "type": "text", + "content": "Austin Xu\\*" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 228, + 156, + 241 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 228, + 156, + 241 + ], + "spans": [ + { + "bbox": [ + 70, + 228, + 156, + 241 + ], + "type": "text", + "content": "Do Xuan Long†,‡" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 246, + 129, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 246, + 129, + 257 + ], + "spans": [ + { + "bbox": [ + 70, + 246, + 129, + 257 + ], + "type": "text", + "content": "Minzhi Li† ‡" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 263, + 146, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 263, + 146, + 275 + ], + "spans": [ + { + "bbox": [ + 70, + 263, + 146, + 275 + ], + "type": "text", + "content": "Chengwei Qin" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 281, + 145, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 281, + 145, + 293 + ], + "spans": [ + { + "bbox": [ + 70, + 281, + 145, + 293 + ], + "type": "text", + "content": "Peifeng Wang*" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 297, + 150, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 297, + 150, + 308 + ], + "spans": [ + { + "bbox": [ + 70, + 297, + 150, + 308 + ], + "type": "text", + "content": "Silvio Savarese*" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 314, + 151, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 314, + 151, + 326 + ], + "spans": [ + { + "bbox": [ + 70, + 314, + 151, + 326 + ], + "type": "text", + "content": "Caiming Xiong*" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 331, + 139, + 343 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 331, + 139, + 343 + ], + "spans": [ + { + "bbox": [ + 70, + 331, + 139, + 343 + ], + "type": "text", + "content": "Shafiq Joty\\*," + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 353, + 213, + 375 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 69, + 353, + 168, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 353, + 168, + 364 + ], + "spans": [ + { + "bbox": [ + 69, + 353, + 168, + 364 + ], + "type": "text", + "content": "\\*Salesforce AI Research" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 70, + 365, + 213, + 375 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 365, + 213, + 375 + ], + "spans": [ + { + "bbox": [ + 70, + 365, + 213, + 375 + ], + "type": "text", + "content": "Nanyang Technological University" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 231, + 353, + 370, + 375 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 231, + 353, + 370, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 231, + 353, + 370, + 365 + ], + "spans": [ + { + "bbox": [ + 231, + 353, + 370, + 365 + ], + "type": "text", + "content": "† National University of Singapore" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 232, + 365, + 341, + 375 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 232, + 365, + 341, + 375 + ], + "spans": [ + { + "bbox": [ + 232, + 365, + 341, + 375 + ], + "type": "inline_equation", + "content": "^\\ddagger I^2 R" + }, + { + "bbox": [ + 232, + 365, + 341, + 375 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 232, + 365, + 341, + 375 + ], + "type": "inline_equation", + "content": "A^{*}STAR" + }, + { + "bbox": [ + 232, + 365, + 341, + 375 + ], + "type": "text", + "content": ", Singapore" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 438, + 146, + 541, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 438, + 146, + 541, + 156 + ], + "spans": [ + { + "bbox": [ + 438, + 146, + 541, + 156 + ], + "type": "text", + "content": "zixuan ke@salesforce.com" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 438, + 163, + 541, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 438, + 163, + 541, + 174 + ], + "spans": [ + { + "bbox": [ + 438, + 163, + 541, + 174 + ], + "type": "text", + "content": "jiaofangkai@hotmail.com" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 435, + 180, + 540, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 435, + 180, + 540, + 191 + ], + "spans": [ + { + "bbox": [ + 435, + 180, + 540, + 191 + ], + "type": "text", + "content": "yifei.ming@salesforce.com" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 443, + 198, + 540, + 207 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 443, + 198, + 540, + 207 + ], + "spans": [ + { + "bbox": [ + 443, + 198, + 540, + 207 + ], + "type": "text", + "content": "xnguyen@salesforce.com" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 439, + 214, + 540, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 439, + 214, + 540, + 224 + ], + "spans": [ + { + "bbox": [ + 439, + 214, + 540, + 224 + ], + "type": "text", + "content": "austin.xu@salesforce.com" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 446, + 231, + 540, + 241 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 446, + 231, + 540, + 241 + ], + "spans": [ + { + "bbox": [ + 446, + 231, + 540, + 241 + ], + "type": "text", + "content": "xuanlong.do@u.nus.edu" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 459, + 247, + 540, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 459, + 247, + 540, + 257 + ], + "spans": [ + { + "bbox": [ + 459, + 247, + 540, + 257 + ], + "type": "text", + "content": "li.minzhi@u.nus.edu" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 435, + 265, + 540, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 435, + 265, + 540, + 275 + ], + "spans": [ + { + "bbox": [ + 435, + 265, + 540, + 275 + ], + "type": "text", + "content": "chengwei003@e.ntu.edu.sg" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 424, + 281, + 540, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 424, + 281, + 540, + 293 + ], + "spans": [ + { + "bbox": [ + 424, + 281, + 540, + 293 + ], + "type": "text", + "content": "peifeng.wang@salesforce.com" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 438, + 299, + 540, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 438, + 299, + 540, + 308 + ], + "spans": [ + { + "bbox": [ + 438, + 299, + 540, + 308 + ], + "type": "text", + "content": "ssavarese@salesforce.com" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 450, + 316, + 540, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 450, + 316, + 540, + 326 + ], + "spans": [ + { + "bbox": [ + 450, + 316, + 540, + 326 + ], + "type": "text", + "content": "cxiong@salesforce.com" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 456, + 332, + 540, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 456, + 332, + 540, + 342 + ], + "spans": [ + { + "bbox": [ + 456, + 332, + 540, + 342 + ], + "type": "text", + "content": "sjoty@salesforce.com" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 280, + 424, + 330, + 437 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 280, + 424, + 330, + 437 + ], + "spans": [ + { + "bbox": [ + 280, + 424, + 330, + 437 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 104, + 457, + 506, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 457, + 506, + 721 + ], + "spans": [ + { + "bbox": [ + 104, + 457, + 506, + 721 + ], + "type": "text", + "content": "Reasoning is a fundamental cognitive process that enables logical inference, problem-solving, and decision-making. With the rapid advancement of large language models (LLMs), reasoning has emerged as a key capability that distinguishes advanced AI systems from conventional models that empower chatbots. In this survey, we categorize existing methods along two orthogonal dimensions: (1) Regimes, which define the stage at which reasoning is achieved (either at inference time or through dedicated training); and (2) Architectures, which determine the components involved in the reasoning process, distinguishing between standalone LLMs and agentic compound systems that incorporate external tools, and multiagent collaborations. Within each dimension, we analyze two key perspectives: (1) Input level, which focuses on techniques that construct high-quality prompts that the LLM condition on; and (2) Output level, which methods that refine multiple sampled candidates to enhance reasoning quality. This categorization provides a systematic understanding of the evolving landscape of LLM reasoning, highlighting emerging trends such as the shift from inference-scaling to learning-to-reason (e.g., DeepSeek-R1), and the transition to agentic workflows (e.g., OpenAI Deep Research, Manus Agent). Additionally, we cover a broad spectrum of learning algorithms, from supervised fine-tuning to reinforcement learning such as PPO and GRPO, and the training of reasoners and verifiers. We also examine key designs of agentic workflows, from established patterns like generator-evaluator and LLM debate to recent innovations. Finally, we identify emerging trends, such as domain-specific reasoning systems, and open challenges, such as evaluation and data quality. This survey aims to provide AI researchers and practitioners with a comprehensive foundation for advancing reasoning in LLMs, paving the way for more sophisticated and reliable AI systems." + } + ] + } + ], + "index": 33 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 225, + 37, + 563 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 225, + 37, + 563 + ], + "spans": [ + { + "bbox": [ + 14, + 225, + 37, + 563 + ], + "type": "text", + "content": "arXiv:2504.09037v3 [cs.AI] 5 Aug 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 34 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 120, + 82, + 315, + 259 + ], + "blocks": [ + { + "bbox": [ + 120, + 82, + 315, + 259 + ], + "lines": [ + { + "bbox": [ + 120, + 82, + 315, + 259 + ], + "spans": [ + { + "bbox": [ + 120, + 82, + 315, + 259 + ], + "type": "image", + "image_path": "a76aac3ee69b5e7e56531440627ceca98535c9e7ba6066bdb1b715823ec14a60.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 270, + 541, + 331 + ], + "lines": [ + { + "bbox": [ + 67, + 270, + 541, + 331 + ], + "spans": [ + { + "bbox": [ + 67, + 270, + 541, + 331 + ], + "type": "text", + "content": "Figure 1: The LLM reasoning surge. We show the cumulative number (in thousands) of papers published from 2022 to 2/2025, based on Semantic Scholar keyword search. Research on reasoning regimes and agent architectures has accelerated notably since the introduction of Chain-of-Thought (CoT) in 2022. This growth is further influenced by other major developments, such as the release of ChatGPT (Ouyang et al., 2022) in 9/2022, and popularity of in-context learning (Brown et al., 2020) as an inference-time optimization method." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 309, + 82, + 487, + 259 + ], + "blocks": [ + { + "bbox": [ + 309, + 82, + 487, + 259 + ], + "lines": [ + { + "bbox": [ + 309, + 82, + 487, + 259 + ], + "spans": [ + { + "bbox": [ + 309, + 82, + 487, + 259 + ], + "type": "image", + "image_path": "dc2db5b5890553285102598a559db40fef121a42cef38c9a947e2f9dd3832578.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 356, + 160, + 369 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 356, + 160, + 369 + ], + "spans": [ + { + "bbox": [ + 69, + 356, + 160, + 369 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 385, + 541, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 385, + 541, + 458 + ], + "spans": [ + { + "bbox": [ + 67, + 385, + 541, + 458 + ], + "type": "text", + "content": "Reasoning is the cognitive process of analyzing evidence, constructing arguments, and applying logic to form conclusions or make informed judgments. It is essential to many intellectual pursuits, including decision-making, problem-solving, and critical thinking. The study of reasoning spans multiple disciplines—philosophy (Passmore, 1961), psychology (Wason & JohnsonLaird, 1972), and computer science (Huth & Ryan, 2004)—as it provides insights into how individuals interpret information, evaluate alternatives, and develop sound conclusions using logic." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 463, + 541, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 463, + 541, + 594 + ], + "spans": [ + { + "bbox": [ + 67, + 463, + 541, + 594 + ], + "type": "text", + "content": "Recently, large language models (LLMs) have demonstrated a range of emerging abilities, such as in-context learning (Dong et al., 2024), role playing (Shanahan et al., 2023b) and domain adaptation (Ke et al., 2023; 2025a; Ke & Liu, 2023) as they scale, with reasoning becoming one of the most critical capabilities. As shown in Figure 1, this area has rapidly gained research attention, often referred to as LLM reasoning or reasoning language model (RLM) (Besta et al., 2025). The increasing focus on this topic is understandable, as reasoning capability is: (i) Challenging, requiring multi-step processing beyond the token-by-token generative nature of auto-regressive LLMs; (ii) Fundamental, as it is a core aspect of intelligence, particularly in planning and strategic decision-making; and, most importantly, (iii) Promising, as recent advances in LLMs hint at a viable path forward. Given these factors, reasoning is widely regarded as a prerequisite for more advanced AI systems approaching Artificial General Intelligence (AGI), beyond the conventional AI that aims to closely follow instruction (Duenas & Ruiz, 2024)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 601, + 541, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 601, + 541, + 733 + ], + "spans": [ + { + "bbox": [ + 67, + 601, + 541, + 733 + ], + "type": "text", + "content": "Reasoning requires LLMs to go beyond directly producing an answer from a question; instead, they must generate the thinking process (implicitly or explicitly) in the form of 'question " + }, + { + "bbox": [ + 67, + 601, + 541, + 733 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 67, + 601, + 541, + 733 + ], + "type": "text", + "content": " reasoning steps " + }, + { + "bbox": [ + 67, + 601, + 541, + 733 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 67, + 601, + 541, + 733 + ], + "type": "text", + "content": " answer'. It has been shown that scaling pre-training may not be the optimal solution for improving reasoning (Snell et al., 2025; OpenAI, 2025). Instead, one popular approach to achieve this is the well-known chain-of-thought (CoT) prompting (Wei et al., 2022b), which demonstrates that by modifying the prompt (e.g., 'Let us think step by step') or in-context samples, LLMs can elicit a step-by-step reasoning process at test time without additional training. Such intuitive prompting techniques have been shown to substantially improve LLMs' reasoning accuracy (Wei et al., 2022b). Building on this, the ability of LLMs to reason effectively depends on two factors: how and at what stage reasoning is achieved, and what components are involved in the reasoning process. Accordingly, in this survey, we categorize existing research into two orthogonal dimensions: (1) Regime, refers to whether reasoning is achieved through inference-time strategies (aka. inference-time" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 82, + 541, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 82, + 541, + 106 + ], + "spans": [ + { + "bbox": [ + 67, + 82, + 541, + 106 + ], + "type": "text", + "content": "scaling) or through direct learning and adaptation (learning to reason); and (2) Architecture, refers to whether reasoning happens within a single, standalone LLM or within an interactive, agentic system." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 112, + 541, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 112, + 541, + 209 + ], + "spans": [ + { + "bbox": [ + 67, + 112, + 541, + 209 + ], + "type": "text", + "content": "These two dimensions are orthogonal, meaning different regimes can be applied to the same architecture, and different architectures can operate under the same regime. The intersection of these dimensions allows for a more comprehensive and systematic organization of reasoning techniques, encompassing most approaches studied to date while highlighting key trends, such as the shift from inference scaling to learning-to-reason and from standalone LLMs to agentic systems. Notably, most prior surveys have focused on only one or two of these dimensions, typically inference scaling and standalone LLMs, rarely considering both together (see detailed comparison later). By introducing this categorization, we aim to provide a structured perspective that clarifies the diverse landscape of LLM reasoning and establishes a foundation for future research." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 221, + 187, + 235 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 221, + 187, + 235 + ], + "spans": [ + { + "bbox": [ + 69, + 221, + 187, + 235 + ], + "type": "text", + "content": "1.1 Reasoning Regimes" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 243, + 541, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 243, + 541, + 316 + ], + "spans": [ + { + "bbox": [ + 67, + 243, + 541, + 316 + ], + "type": "text", + "content": "Inference scaling CoT prompting demonstrates the potential to scale inference-time (test-time) reasoning. It has also been shown that optimal scaling of test-time compute can be more effective than scaling model parameters (Snell et al., 2024), as it improves generalization through enhanced flexibility in prompt and workflow design. Building on this, inference scaling techniques have emerged, allowing additional test-time computation before generating an answer. The key idea is that instead of updating the LLM itself, these methods aim to select the best trajectories to improve reasoning." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 321, + 541, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 321, + 541, + 406 + ], + "spans": [ + { + "bbox": [ + 67, + 321, + 541, + 406 + ], + "type": "text", + "content": "Several variants of prompting methods (Paranjape et al., 2021; Sanh et al., 2022; Mishra et al., 2022) have been introduced, providing structured prompts to enhance reasoning. Additionally, inference scaling optimizes reasoning through search and planning (Dua et al., 2022; Zhou et al., 2023a; Khot et al., 2023; Suzgun & Kalai, 2024a). One key challenge in search and planning is evaluating the quality of candidate solutions. However, evaluating reasoning quality is inherently difficult, even for humans. Existing approaches can be categorized based on whether they judge the final outcome, i.e., outcome reward models (ORMs) (Hendrycks et al., 2021b), or the reasoning process, i.e., process reward models (PRMs) (Lightman et al., 2024)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 411, + 541, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 411, + 541, + 447 + ], + "spans": [ + { + "bbox": [ + 67, + 411, + 541, + 447 + ], + "type": "text", + "content": "One of the most notable milestones in this direction is OpenAI's o1 (09/2024) (OpenAI et al., 2024), which demonstrate the effectiveness of inference-time scaling in complex tasks like mathematics, coding and scientific problem-solving:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 462, + 506, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 462, + 506, + 510 + ], + "spans": [ + { + "bbox": [ + 104, + 462, + 506, + 510 + ], + "type": "text", + "content": "\"We have found that the performance of o1 consistently improves with more reinforcement learning (train-time compute) and with more time spent thinking (test-time compute). The constraints on scaling this approach differ substantially from those of LLM pretraining, and we are continuing to investigate them.\" — OpenAI o1 release blog" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 525, + 541, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 525, + 541, + 682 + ], + "spans": [ + { + "bbox": [ + 67, + 525, + 541, + 682 + ], + "type": "text", + "content": "Learning-to-reason Another approach to unleash the deliberate thinking is updating the LLM through training. Unlike inference scaling, learning-to-reason aims to enhance reasoning capabilities through dedicated training, reducing reliance on costly inference-time computations. However, a key challenge in this regime is the scarcity of training data, as step-by-step human-annotated reasoning trajectories are prohibitively expensive to collect. To address this, research has focused on automatically generating such trajectories and developing effective training strategies to leverage them. For example, supervised fine-tuning with long CoT (Muennighoff et al., 2025) or preference learning with reasoning preference data, with DPO (Rafailov et al., 2023) as a representative approach. More recent approaches even bypass reasoning annotation by using reinforcement learning (RL), with recent work like GRPO (Shao et al., 2024) demonstrating remarkable success in this direction. A significant milestone in this direction is DeepSeek-R1 (01/2025) (DeepSeek-AI et al., 2025), an open-source model that achieves performance comparable to OpenAI's o1 while requiring far fewer computational resources. It further reveals that RL alone is possible to learn the sophisticated behaviors just as the test-time computation increase:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 696, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 696, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 696, + 506, + 734 + ], + "type": "text", + "content": "\"One of the most remarkable aspects of this self-evolution is the emergence of sophisticated behaviors as the test-time computation increases. Behaviors such as reflection—where the model revisits and reevaluates its previous steps—and the exploration of alternative ap-" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 70, + 79, + 541, + 222 + ], + "blocks": [ + { + "bbox": [ + 70, + 79, + 541, + 222 + ], + "lines": [ + { + "bbox": [ + 70, + 79, + 541, + 222 + ], + "spans": [ + { + "bbox": [ + 70, + 79, + 541, + 222 + ], + "type": "image", + "image_path": "60f990a388a33bd4469511e2fcb952ddf14d2a5717e7f7f12189e6a1dacfb1bf.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 72, + 232, + 537, + 245 + ], + "lines": [ + { + "bbox": [ + 72, + 232, + 537, + 245 + ], + "spans": [ + { + "bbox": [ + 72, + 232, + 537, + 245 + ], + "type": "text", + "content": "Figure 2: The proposed categorization over regimes, architectures, and unified perspectives in this survey." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 272, + 506, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 272, + 506, + 308 + ], + "spans": [ + { + "bbox": [ + 104, + 272, + 506, + 308 + ], + "type": "text", + "content": "proaches to problem-solving arise spontaneously. These behaviors are not explicitly programmed but instead emerge as a result of the model's interaction with the reinforcement learning environment.\" — DeepSeek-R1 'Aha moment'" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 331, + 241, + 342 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 331, + 241, + 342 + ], + "spans": [ + { + "bbox": [ + 69, + 331, + 241, + 342 + ], + "type": "text", + "content": "1.2 Reasoning System Architecture" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 355, + 541, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 355, + 541, + 416 + ], + "spans": [ + { + "bbox": [ + 67, + 355, + 541, + 416 + ], + "type": "text", + "content": "Standalone LLM and agentic systems Orthogonal to the regimes, studies have explored architectural advancements in LLM reasoning, moving beyond next-token prediction in standalone models to embrace agentic systems—AI systems that exhibit interactivity and autonomy to refine reasoning and decision-making. These systems go beyond the challenges of inference scaling or learning to reason; they introduce system-level complexities, such as designing workflows and coordinating potentially conflicting actions." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 434, + 541, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 434, + 541, + 567 + ], + "spans": [ + { + "bbox": [ + 67, + 434, + 541, + 567 + ], + "type": "text", + "content": "Single-Agent and multi-agent systems To distinguish agentic systems from standalone LLMs, we adopt the perspective of Kapoor et al. (2024), framing agentic behavior as a spectrum. We categorize these systems into two families: single-agent and multi-agent. In single-agent systems, a single LLM interacts with tools in its environment to refine reasoning, actions, and perceptions. These tools include external knowledge bases (Ke et al., 2024; Hammane et al., 2024; Sun et al., 2023), verifiers (Wan et al., 2024c; Guan et al., 2025), and practical applications like code interpreters, calendars, and maps (Yu et al., 2023b; Lu et al., 2024a). By leveraging these resources, the LLM iteratively enhances its decision-making and problem-solving capabilities. Recent milestones in single-agent systems, such as Grok 3 Deep Search (02/2025) and OpenAI Deep Research (02/2025), demonstrate how agents interact with the web to significantly improve reasoning, perform tasks like information retrieval, use code interpreters for calculations, and aggregate data from multiple sources." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 590, + 506, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 590, + 506, + 649 + ], + "spans": [ + { + "bbox": [ + 104, + 590, + 506, + 649 + ], + "type": "text", + "content": "\"Deep research independently discovers, reasons about, and consolidates insights from across the web. To accomplish this, it was trained on real-world tasks requiring browser and Python tool use ... While o1 demonstrates impressive capabilities in coding, math, and other technical domains, many real-world challenges demand extensive context and information gathering from diverse online sources.\" — OpenAI deep research release blog" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 673, + 541, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 673, + 541, + 733 + ], + "spans": [ + { + "bbox": [ + 67, + 673, + 541, + 733 + ], + "type": "text", + "content": "The second family, multi-agent systems, goes beyond agent-environment interactions by enabling agent-agent communication. Each agent takes on a distinct role and exchanges messages with others. Key challenges include designing effective communication protocols—whether collaborative (Chen et al., 2023c) or adversarial (Liang et al., 2023b)—and coordinating actions to reach consensus on the final action for the environment. A recent example of this potential is Manus, a popular product showcasing the power of multi-agent systems." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 82, + 189, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 82, + 189, + 94 + ], + "spans": [ + { + "bbox": [ + 69, + 82, + 189, + 94 + ], + "type": "text", + "content": "1.3 Unified Perspectives" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 103, + 541, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 103, + 541, + 236 + ], + "spans": [ + { + "bbox": [ + 67, + 103, + 541, + 236 + ], + "type": "text", + "content": "Although inference scaling and learning-to-reason take different approaches to improving reasoning, they are inherently connected. Inference scaling focuses on selecting the best reasoning trajectories, while learning-to-reason leverages both good and bad trajectories as training data. To unify these approaches, we categorize reasoning trajectory collection techniques in both regimes based on two key perspectives: input and output. At the input level, techniques modify or augment prompts to guide the LLM toward desirable reasoning paths. At the output level, the LLM generates multiple candidate responses, which are then evaluated, ranked, or refined. This framework highlights that many inference scaling techniques—such as prompt modification or trajectory search—can be repurposed for trajectory collection in learning-to-reason (as described in Section 3 and Section 5). Moreover, this connection shows that the two approaches are complementary: inference scaling methods can be applied to models trained under learning-to-reason, motivating the development of inference-aware learning-to-reason methods (Section 5.4)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 241, + 541, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 241, + 541, + 315 + ], + "spans": [ + { + "bbox": [ + 67, + 241, + 541, + 315 + ], + "type": "text", + "content": "These aspects are also effective across different architectures. Similar to standalone LLMs, we categorize techniques based on input and output perspectives. However, to align with agentic system conventions, we use perception as input (to an agent) and action as output (of an agent) in single-agent systems. For multi-agent systems, we consider communication as input (to a participating agent) and coordination as output (of the system). This analogy provides a unified perspective across regimes and architectures, offering a systematic and generalizable framework for analyzing LLM reasoning (see Figure 2)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 327, + 250, + 340 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 327, + 250, + 340 + ], + "spans": [ + { + "bbox": [ + 69, + 327, + 250, + 340 + ], + "type": "text", + "content": "1.4 Goal and Structure of the Survey" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 349, + 541, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 349, + 541, + 422 + ], + "spans": [ + { + "bbox": [ + 67, + 349, + 541, + 422 + ], + "type": "text", + "content": "The goal of this survey is to provide a comprehensive overview of key algorithmic details and major milestones in LLM reasoning research, particularly since the emergence of Chain-of-Thought (CoT), across both regime and architecture dimensions. We believe this is a timely and valuable contribution to the community, given the clear acceleration in research following CoT's introduction in 2022 (Figure 1). The rapid growth in studies exploring all aspects of LLM reasoning—from regimes and architectures to training algorithms—highlights the increasing importance and utility of reasoning capabilities in advancing the field." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 427, + 539, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 427, + 539, + 536 + ], + "spans": [ + { + "bbox": [ + 67, + 427, + 539, + 536 + ], + "type": "text", + "content": "Figure 2 provides an overview of the categorization in this survey, organized along two orthogonal dimensions. Within each architecture, there are two key perspectives to consider. The first perspective is input, or perception, or communication. This concerns how to construct a better prompt, refine the given observations from the environment, or establish protocols for exchanging messages with other agents. The second is output—encompassing action or coordination—which involves aggregating outputs, enhancing actions, or coordinating actions to produce a final result. While the figure illustrates high-level categorizations, the following sections delve into more specific terms. For example, 'input' is discussed in terms of constructing prompts (see e.g., Sections 3.1.1 and 5.1.1), while 'output' relates to optimizing output and collecting high-quality trajectories (e.g., Sections 3.1.2 and 5.1.2)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 540, + 541, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 540, + 541, + 613 + ], + "spans": [ + { + "bbox": [ + 67, + 540, + 541, + 613 + ], + "type": "text", + "content": "Figure 3 outlines the structure of this survey. We start with a brief introduction to the background, covering key terminologies, components, regimes, and architectures (Section 2). The subsequent sections explore inference scaling (Section 3), learning algorithms for reasoners and verifiers (Section 4), and learning to reason (Section 5). Within the discussions on inference scaling and learning to reason, we examine three key architectures: Standalone LLMs, Single-Agent systems, and Multi-Agent systems. Finally, Section 6 summarizes key insights and discusses open challenges and future directions." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 627, + 241, + 639 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 627, + 241, + 639 + ], + "spans": [ + { + "bbox": [ + 69, + 627, + 241, + 639 + ], + "type": "text", + "content": "1.5 Comparison to Related Surveys" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 648, + 539, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 648, + 539, + 733 + ], + "spans": [ + { + "bbox": [ + 67, + 648, + 539, + 733 + ], + "type": "text", + "content": "Reasoning in LLMs has long been a fundamental challenge in the field. Earlier works, such as Huang & Chang (2023), provide a comprehensive overview of the evolution of informal deductive reasoning covering developments prior to the emergence of LLM agents and Reasoning Language Models (RLMs). Our work extends this discussion by focusing on LLM agents and RLMs. Qiao et al. (2023b) offer a detailed summary of advancements in LLM reasoning, with a particular emphasis on prompting techniques. In contrast, we offer a broader range of regimes (from inference to training) and architectures (from standalone LLM to multi-agent systems). Readers interested in a formal definition and taxonomy of natural language reasoning—grounded" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 69, + 79, + 553, + 589 + ], + "blocks": [ + { + "bbox": [ + 69, + 79, + 553, + 589 + ], + "lines": [ + { + "bbox": [ + 69, + 79, + 553, + 589 + ], + "spans": [ + { + "bbox": [ + 69, + 79, + 553, + 589 + ], + "type": "image", + "image_path": "67d40ba5d0383a1d1ca495c78f52298962e74f965a5a0cc2b362840f3cacbaab.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 68, + 595, + 541, + 633 + ], + "lines": [ + { + "bbox": [ + 68, + 595, + 541, + 633 + ], + "spans": [ + { + "bbox": [ + 68, + 595, + 541, + 633 + ], + "type": "text", + "content": "Figure 3: Taxonomy of LLM reasoning research organized in this survey by regimes (inference scaling, learning to reason) and architectures (standalone LLM, single-agent, multi-agent). Each leaf node includes examples from the literature that focus on the corresponding category." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 654, + 541, + 680 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 654, + 541, + 680 + ], + "spans": [ + { + "bbox": [ + 68, + 654, + 541, + 680 + ], + "type": "text", + "content": "in philosophical foundations—may refer to Yu et al. (2024a), which focuses specifically on this direction and is complementary to our scope." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 685, + 541, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 685, + 541, + 733 + ], + "spans": [ + { + "bbox": [ + 68, + 685, + 541, + 733 + ], + "type": "text", + "content": "Improvements in LLM reasoning are closely tied to advancements in a variety of techniques. Dong et al. (2024) present a comprehensive survey on in-context learning (ICL), while Zhou et al. (2024c) explore the interpretation and analysis of ICL from both theoretical and empirical perspectives. In contrast, our work organizes ICL techniques under different regimes—standalone LLMs, single-agent, and multi-agent" + } + ] + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 82, + 541, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 82, + 541, + 178 + ], + "spans": [ + { + "bbox": [ + 67, + 82, + 541, + 178 + ], + "type": "text", + "content": "systems—highlighting how these techniques evolve and interact within each setting. Recent studies suggest that enhancements in reasoning are often linked to inference scaling. Dong et al. (2024) provide an extensive review of inference-time self-improvement, and Welleck et al. (2024) offer a survey focused on three key themes: token-level generation algorithms, meta-generation algorithms, and efficient generation. Following the release of Reasoning Language Models (RLMs) such as OpenAI's o1 and DeepSeek's R1, there has been a significant increase in research dedicated to learning-to-reason approaches. Zeng et al. (2024) and Xu et al. (2025d) provide thorough surveys on these emerging developments. However, these surveys primarily focus on LLMs, and do not address agentic or multi-agent reasoning settings in depth." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 183, + 541, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 183, + 541, + 269 + ], + "spans": [ + { + "bbox": [ + 67, + 183, + 541, + 269 + ], + "type": "text", + "content": "Research on LLM reasoning has predominantly centered on logical and mathematical reasoning. Liu et al. (2025a) offer a comprehensive survey of logical reasoning in LLMs, delving into its theoretical foundations and associated benchmarks. In their position paper, Yang et al. (2024d) underscore the pivotal role of formal mathematical reasoning, showcasing its superiority over traditional NLP-based methods in generating verifiable proofs and automated feedback. Their work outlines progress in theorem proving and auto-formalization while identifying key challenges that remain. While we cover domain-specific reasoning in Section 6.1.3, we refer readers to Liu et al. (2025a) and Yang et al. (2024d) for a more in-depth treatment of these topics." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 273, + 541, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 273, + 541, + 345 + ], + "spans": [ + { + "bbox": [ + 67, + 273, + 541, + 345 + ], + "type": "text", + "content": "Reasoning is a critical capability in agentic systems (Pezeshkpour et al., 2024; Masterman et al., 2024). While numerous reviews focus on agent systems (Xi et al., 2023; Kapoor et al., 2024), discussions on reasoning within these systems remain limited. A concurrent work by Besta et al. (2025) introduces a comprehensive and modular framework for RLMs that systematically organizes key components such as reasoning structures, strategies, benchmarks and learning algorithms. However, their work does not delve into agentic and multiagent LLM systems.1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 351, + 539, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 351, + 539, + 483 + ], + "spans": [ + { + "bbox": [ + 67, + 351, + 539, + 483 + ], + "type": "text", + "content": "This survey provides a comprehensive overview of major milestones in LLM reasoning research, emphasizing two key dimensions: (1) the evolution of learning schemes—from inference scaling to learning-to-reason approaches—and (2) architectural advancements—from single LLMs to multi-agent systems. These dimensions summarize recent progress and lay the groundwork for future reasoning LLMs and agentic systems. We unify techniques under input and output perspectives, clarifying what must be customized or designed when building reasoning systems. Additionally, we detail essential techniques, including a comparison of the latest learning algorithms (e.g., RL) and an in-depth discussion of refiners and verifiers, which are critical for facilitating reasoning. Given these contributions, our survey is timely, offering AI researchers up-to-date insights into the field. We anticipate further research along these dimensions, such as agent-human regimes (Liang et al., 2024) and automated workflow design architectures (Hu et al., 2025; Zhang et al., 2024c; Zhou et al., 2025a)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 498, + 157, + 512 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 498, + 157, + 512 + ], + "spans": [ + { + "bbox": [ + 69, + 498, + 157, + 512 + ], + "type": "text", + "content": "2 Background" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 524, + 483, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 524, + 483, + 537 + ], + "spans": [ + { + "bbox": [ + 67, + 524, + 483, + 537 + ], + "type": "text", + "content": "In this section, we introduce foundational concepts that will be utilized throughout the paper." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 550, + 194, + 562 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 550, + 194, + 562 + ], + "spans": [ + { + "bbox": [ + 69, + 550, + 194, + 562 + ], + "type": "text", + "content": "2.1 Problem Formulation" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 571, + 539, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 571, + 539, + 620 + ], + "spans": [ + { + "bbox": [ + 67, + 571, + 539, + 620 + ], + "type": "text", + "content": "LLM reasoning is often formulated within the Markov Decision Process (MDP) framework (Bellman, 1958), treating reasoning as a sequential decision-making process. While many of the terminologies in LLM reasoning originate from the AI agent and reinforcement learning (RL) literature (Russell & Norvig, 2010), their meaning in LLM reasoning can sometimes differ to suit the nature of LLM-based reasoning." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 632, + 541, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 632, + 541, + 704 + ], + "spans": [ + { + "bbox": [ + 67, + 632, + 541, + 704 + ], + "type": "text", + "content": "Reasoning step and thought The definition of what makes a reasoning step can vary depending on the specific inference or learning algorithm used, and it often depends on the granularity at which rewards (or feedback) are considered. Generally, a reasoning step can be expressed as a sequence of tokens " + }, + { + "bbox": [ + 67, + 632, + 541, + 704 + ], + "type": "inline_equation", + "content": "a_{t} = (x_{t_{1}},\\ldots ,x_{t_{K}})" + }, + { + "bbox": [ + 67, + 632, + 541, + 704 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 67, + 632, + 541, + 704 + ], + "type": "inline_equation", + "content": "x_{t_k}" + }, + { + "bbox": [ + 67, + 632, + 541, + 704 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 67, + 632, + 541, + 704 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 67, + 632, + 541, + 704 + ], + "type": "text", + "content": "-th token at inference step " + }, + { + "bbox": [ + 67, + 632, + 541, + 704 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 67, + 632, + 541, + 704 + ], + "type": "text", + "content": ". Typically, " + }, + { + "bbox": [ + 67, + 632, + 541, + 704 + ], + "type": "inline_equation", + "content": "a_{t}" + }, + { + "bbox": [ + 67, + 632, + 541, + 704 + ], + "type": "text", + "content": " represents a coherent step in reasoning (Lightman et al., 2024), such as a logical deduction or an intermediate conclusion. However, in extreme cases, a reasoning step can be the entire response (Zhang et al., 2024b; DeepSeek-AI et al., 2025)" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 68, + 712, + 541, + 733 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 712, + 541, + 733 + ], + "spans": [ + { + "bbox": [ + 68, + 712, + 541, + 733 + ], + "type": "text", + "content": "To avoid redundancy with existing literature, we do not include an analysis of reasoning benchmarks in this survey. For a detailed discussion of benchmarks, we direct readers to Xu et al. (2025d); Besta et al. (2025)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 72, + 80, + 538, + 177 + ], + "blocks": [ + { + "bbox": [ + 72, + 80, + 538, + 177 + ], + "lines": [ + { + "bbox": [ + 72, + 80, + 538, + 177 + ], + "spans": [ + { + "bbox": [ + 72, + 80, + 538, + 177 + ], + "type": "table", + "html": "
SymbolName/terminologyExplanation
atAction/responseThe reasoning step or action taken at time step t , where t ∈ {1,2,...,T}
stState/contextst := (q, a1, ..., at-1), where q is the prompt/question.
RReward model/verifierEvaluates the reasoning quality of action at state st, providing feedback.
rtRewardrt := R(st, at), reward given by verifier at time step t.
τTrajectoryτ := ((s0, a0, r0), ..., (sT, aT, rT)), The entire reasoning process leading to an answer.
πPolicy model/reasonerat ~ π(at|st): The reasoning strategy that maps a reasoning state to the next reasoning step.
VValue ModelEstimates the expected future reasoning quality from state st.
FRefinera′t = F(st, at, rt): Modifies or refines the action based on feedback from the verifier.
", + "image_path": "ccf703a6c16491ff1de1e5cf6ff471fa0b06b107d09e10325da106c385ddafd4.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 155, + 184, + 455, + 197 + ], + "lines": [ + { + "bbox": [ + 155, + 184, + 455, + 197 + ], + "spans": [ + { + "bbox": [ + 155, + 184, + 455, + 197 + ], + "type": "text", + "content": "Table 1: An overview of symbols and terminologies for convenience." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 217, + 541, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 217, + 541, + 255 + ], + "spans": [ + { + "bbox": [ + 67, + 217, + 541, + 255 + ], + "type": "text", + "content": "or a single token (Schulman et al., 2017; Ouyang et al., 2022).2 The term Thought generally refers to the sequence of reasoning steps (i.e., reasoning trajectory) that occur from the question (excluding the question itself) to the final answer (excluding the final answer)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 266, + 541, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 266, + 541, + 373 + ], + "spans": [ + { + "bbox": [ + 67, + 266, + 541, + 373 + ], + "type": "text", + "content": "Reasoning as MDP An MDP is a general framework for modeling environments where an agent makes sequential decisions by observing states and receiving rewards for its actions. The state-action-reward trajectories in an MDP can be formally expressed as: " + }, + { + "bbox": [ + 67, + 266, + 541, + 373 + ], + "type": "inline_equation", + "content": "\\tau = ((s_0, a_0, r_0), \\ldots, (s_T, a_T, r_T))" + }, + { + "bbox": [ + 67, + 266, + 541, + 373 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 67, + 266, + 541, + 373 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 67, + 266, + 541, + 373 + ], + "type": "text", + "content": " is the trajectory length. Naturally, LLM reasoning can be framed as an MDP, as each reasoning step builds upon previous ones to arrive at a final answer " + }, + { + "bbox": [ + 67, + 266, + 541, + 373 + ], + "type": "inline_equation", + "content": "(s_T)" + }, + { + "bbox": [ + 67, + 266, + 541, + 373 + ], + "type": "text", + "content": " from a question " + }, + { + "bbox": [ + 67, + 266, + 541, + 373 + ], + "type": "inline_equation", + "content": "(s_0)" + }, + { + "bbox": [ + 67, + 266, + 541, + 373 + ], + "type": "text", + "content": ". However, a key distinction lies in how the state transition function " + }, + { + "bbox": [ + 67, + 266, + 541, + 373 + ], + "type": "inline_equation", + "content": "P(s_{t+1} | s_t, a_t)" + }, + { + "bbox": [ + 67, + 266, + 541, + 373 + ], + "type": "text", + "content": " is defined. In traditional MDPs, state transitions are driven by the environment (unknown to the agent). In LLM reasoning, this depends on the system architecture: in standalone LLMs, the model itself generates the next state, whereas in agentic systems, state transitions can be influenced by external tools within the environment." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 380, + 541, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 380, + 541, + 392 + ], + "spans": [ + { + "bbox": [ + 67, + 380, + 541, + 392 + ], + "type": "text", + "content": "In RL-based approaches, the goal is to maximize the reasoning quality measured by the cumulative reward:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 245, + 407, + 541, + 440 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 245, + 407, + 541, + 440 + ], + "spans": [ + { + "bbox": [ + 245, + 407, + 541, + 440 + ], + "type": "interline_equation", + "content": "\\max \\mathbb {E} _ {\\tau \\sim P (\\tau | s _ {0}, \\pi)} \\left[ \\sum_ {t = 1} ^ {T} r _ {t} \\right], \\tag {1}", + "image_path": "42f2a813011d2945cd7d0f59d91cc7b3579bb147c2de1a3cb07f649bb422c1b6.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 445, + 541, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 445, + 541, + 529 + ], + "spans": [ + { + "bbox": [ + 67, + 445, + 541, + 529 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 445, + 541, + 529 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 67, + 445, + 541, + 529 + ], + "type": "text", + "content": " is the reasoning policy and " + }, + { + "bbox": [ + 67, + 445, + 541, + 529 + ], + "type": "inline_equation", + "content": "r_t = \\mathcal{R}(s_t, a_t)" + }, + { + "bbox": [ + 67, + 445, + 541, + 529 + ], + "type": "text", + "content": " is the reward given by the reward function " + }, + { + "bbox": [ + 67, + 445, + 541, + 529 + ], + "type": "inline_equation", + "content": "\\mathcal{R}" + }, + { + "bbox": [ + 67, + 445, + 541, + 529 + ], + "type": "text", + "content": " at time step " + }, + { + "bbox": [ + 67, + 445, + 541, + 529 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 67, + 445, + 541, + 529 + ], + "type": "text", + "content": ". There are two primary approaches to optimize Equation 1. The first is via training, which involves optimizing model parameters to learn the optimal policy " + }, + { + "bbox": [ + 67, + 445, + 541, + 529 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 67, + 445, + 541, + 529 + ], + "type": "text", + "content": " through methods like preference learning (e.g., DPO (Rafailov et al., 2023)) or reinforcement learning (e.g., PPO (Schulman et al., 2017)). The second is inference-scaling, which optimizes Equation 1 without altering model parameters. Instead, it employs a form of \"search\" with a frozen model, often guided by a reward model (Zhang et al., 2025b). We summarize key terminologies in Table 1." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 541, + 306, + 555 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 541, + 306, + 555 + ], + "spans": [ + { + "bbox": [ + 69, + 541, + 306, + 555 + ], + "type": "text", + "content": "2.2 Key Components of LLM Reasoning Systems" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 563, + 541, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 563, + 541, + 647 + ], + "spans": [ + { + "bbox": [ + 67, + 563, + 541, + 647 + ], + "type": "text", + "content": "An LLM-based reasoning system may contain three key components depending on the reasoning regime and system architecture: (a) A Reasoner that generates the reasoning steps, serving as the policy model; (b) Verifiers that evaluate the correctness of the final outcome and/or reasoning steps, serving as reward functions; and (c) A Refiner that improves reasoning trajectories by refining responses based on the feedback from the verifier. Figure 4 shows a depiction of these components. While these components play complementary and important roles in a reasoning system, they can be implemented by the same LLM, e.g., self-refinement (Saunders et al., 2022; Madaan et al., 2024) unifies them." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 659, + 541, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 659, + 541, + 696 + ], + "spans": [ + { + "bbox": [ + 67, + 659, + 541, + 696 + ], + "type": "text", + "content": "Reasoner The reasoner generates reasoning steps based on the current state of the reasoning process. It takes as input the previous states and outputs the next response or action. As the core component of a reasoning system, it determines how reasoning progresses and influences the final outcome." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 67, + 702, + 541, + 733 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 702, + 541, + 733 + ], + "spans": [ + { + "bbox": [ + 67, + 702, + 541, + 733 + ], + "type": "text", + "content": "2Although RLHF (Reinforcement Learning from Human Feedback) methods (Ouyang et al., 2022) receive rewards based on the final answer (outcome level), the underlying RL algorithms operate as multi-step RL at the token level. This differs from approaches like DeepSeek-R1 (DeepSeek-AI et al., 2025), which employs one-step RL for training." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 187, + 80, + 424, + 277 + ], + "blocks": [ + { + "bbox": [ + 187, + 80, + 424, + 277 + ], + "lines": [ + { + "bbox": [ + 187, + 80, + 424, + 277 + ], + "spans": [ + { + "bbox": [ + 187, + 80, + 424, + 277 + ], + "type": "image", + "image_path": "26c8347b750ab38973406a291c8177a4bbe8c7b3a3166b8f3370a1e920c5081b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 285, + 542, + 357 + ], + "lines": [ + { + "bbox": [ + 67, + 285, + 542, + 357 + ], + "spans": [ + { + "bbox": [ + 67, + 285, + 542, + 357 + ], + "type": "text", + "content": "Figure 4: Three key components of a reasoning system. The Reasoner proposes new responses (usually accompanied with rationales) for a query. The Verifier takes as input a verification instruction (e.g., what aspects to evaluate) and the response(s) from the reasoner, then outputs a judgment on the response(s) (often in the form of a numeric score or relative order, and typically accompanied by a natural language critique or rationale for its judgment). The Refiner, unlike the first two, takes as input an incorrect response and optionally the critique (as provided by the verifier) and outputs a revised response." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 377, + 541, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 377, + 541, + 451 + ], + "spans": [ + { + "bbox": [ + 67, + 377, + 541, + 451 + ], + "type": "text", + "content": "**Verifier** The verifier assesses the quality of the final answer or intermediate reasoning steps and provides feedback to the reasoner. Verifiers can be outcome-level, where only the outcome is evaluated, or process-level, where intermediate reasoning steps are also evaluated. The type of feedback can range from a scalar reward (e.g., correct/wrong answer on a math problem or pass/fail for code test case) to natural language explanations. When ground-truth is available (e.g., during training), the verifier can be implemented using rule-based functions (e.g., string matching) or by training a reward model or using an LLM-judge model." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 461, + 541, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 461, + 541, + 534 + ], + "spans": [ + { + "bbox": [ + 67, + 461, + 541, + 534 + ], + "type": "text", + "content": "Refiner Given a feedback from the verifier, as well as a response from the reasoner, a refiner tries to improve and polish the original reasoning trajectory containing flaws. Refiners can play two important roles in reasoning. First, it can serve as a general approach to improve the performance during inference. More importantly, by providing explicit analysis, a refiner can also conduct implicit search, i.e., pointing out the obstacles in current trajectory, and offer a new perspective to compress the search space. Yet, recent studies (Qu et al., 2024a) show that is not at least easier than learning reasoning." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 546, + 195, + 559 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 546, + 195, + 559 + ], + "spans": [ + { + "bbox": [ + 69, + 546, + 195, + 559 + ], + "type": "text", + "content": "2.3 System Architectures" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 568, + 541, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 568, + 541, + 629 + ], + "spans": [ + { + "bbox": [ + 67, + 568, + 541, + 629 + ], + "type": "text", + "content": "Building on the three key components introduced above, in this section, we describe how these elements are organized within different system architectures to achieve effective reasoning. While the three components serve as the foundation, their integration and interaction vary across architectural paradigms. In this survey, we structure reasoning systems into three main types: standalone LLM, single-agent system, and multi-agent system. Figure 5 shows their comparison with visualizations." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 640, + 223, + 653 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 640, + 223, + 653 + ], + "spans": [ + { + "bbox": [ + 69, + 640, + 223, + 653 + ], + "type": "text", + "content": "2.3.1 Standalone LLM Systems" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 660, + 541, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 660, + 541, + 733 + ], + "spans": [ + { + "bbox": [ + 67, + 660, + 541, + 733 + ], + "type": "text", + "content": "A standalone LLM system comprises a single LLM which can play the role of one or more components (we refer this as unified components) in the reasoning system. It processes an input prompt and generates final outputs, which often include rationales or reasoning steps. As an LLM, it has the capability to produce diverse rationales through sampling—a key property utilized by many advanced reasoning techniques. Importantly, a standalone LLM operates independently, without interacting with external environments or collaborating with other LLMs. Its decision-making is based solely on simple input-output mappings or through iterative" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 751, + 309, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 761 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 761 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 70, + 79, + 541, + 350 + ], + "blocks": [ + { + "bbox": [ + 70, + 79, + 541, + 350 + ], + "lines": [ + { + "bbox": [ + 70, + 79, + 541, + 350 + ], + "spans": [ + { + "bbox": [ + 70, + 79, + 541, + 350 + ], + "type": "image", + "image_path": "ff18fb1a724ca38991f358e0f8a0b23cfed9336202da57dbd8f7c7110d2f0334.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 68, + 356, + 541, + 381 + ], + "lines": [ + { + "bbox": [ + 68, + 356, + 541, + 381 + ], + "spans": [ + { + "bbox": [ + 68, + 356, + 541, + 381 + ], + "type": "text", + "content": "Figure 5: Three architecture types used for designing a reasoning system in the context of LLMs. highlights perspectives that the literature emphasizes for customization." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 405, + 541, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 405, + 541, + 441 + ], + "spans": [ + { + "bbox": [ + 67, + 405, + 541, + 441 + ], + "type": "text", + "content": "sampling from the same model, where the prompt incorporates prior reasoning steps (a method known as self-contained reasoning). This self-contained nature allows the LLM to function autonomously while maintaining coherence in its reasoning processes." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 455, + 305, + 468 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 455, + 305, + 468 + ], + "spans": [ + { + "bbox": [ + 69, + 455, + 305, + 468 + ], + "type": "text", + "content": "2.3.2 From Standalone LLM to Language Agents" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 476, + 541, + 646 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 476, + 541, + 646 + ], + "spans": [ + { + "bbox": [ + 67, + 476, + 541, + 646 + ], + "type": "text", + "content": "While the concept of an agent has been a long-standing idea in AI (Russell & Norvig, 2010), the notion of language agents has gained prominence alongside recent advancements in LLMs. The key distinction between an agent and a standalone LLM lies in two advanced capabilities: interactivity (Weng, 2023; Yao & Narasimhan, 2023) and autonomy (Xi et al., 2023; Wang et al., 2024d). Interactivity refers to an agent's ability to engage with the external world, including environments or other agents. This capability is crucial because LLMs, while powerful, often have limited knowledge and reasoning abilities confined to their internal memory. By enabling interaction with the external world, an LLM can augment its internal knowledge with external information, significantly expanding its understanding and grounding its outputs in real-world observations. Autonomy, on the other hand, refers to an agent's ability not only to follow human instructions but also to independently initiate and execute actions. This capability often involves planning but can extend to more complex behaviors. For instance, a fully autonomous agent should be capable of detecting novel situations, proactively taking initiative, and determining effective interaction strategies without explicit human guidance. These advanced capabilities distinguish LLM-based agents from standalone LLMs, enabling them to operate more dynamically and adaptively in real-world scenarios." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 650, + 541, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 650, + 541, + 711 + ], + "spans": [ + { + "bbox": [ + 67, + 650, + 541, + 711 + ], + "type": "text", + "content": "To delineate the boundary between the agent and its environment, we employ the concept of controllability (Sumers et al., 2024). Specifically, the environment is defined as an external module that the agent cannot modify. For example, a knowledge base containing resources like Wikipedia or a compiler is considered part of the environment because the agent cannot alter it. Similarly, another LLM acting as a judge or verifier is also treated as part of the environment, as its outputs operate independently of the agent. In contrast," + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 80, + 721, + 470, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 721, + 470, + 732 + ], + "spans": [ + { + "bbox": [ + 80, + 721, + 470, + 732 + ], + "type": "text", + "content": "3In this survey, the terms agent and LLM-based agent are used interchangeably unless stated otherwise." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 82, + 541, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 82, + 541, + 106 + ], + "spans": [ + { + "bbox": [ + 67, + 82, + 541, + 106 + ], + "type": "text", + "content": "components like working memory or prompts that the agent can directly modify are not classified as part of the environment." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 112, + 541, + 173 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 112, + 541, + 173 + ], + "spans": [ + { + "bbox": [ + 67, + 112, + 541, + 173 + ], + "type": "text", + "content": "In this work, we adopt the perspective of Kapoor et al. (2024), which conceptualizes agentiness as a spectrum. The more interactiveness and autonomy an LLM exhibits, the more agentic it is considered to be. In the upper right of Figure 5, we illustrate this spectrum visually. Within this spectrum, we define a system with agent-environment interaction as a single-agent system and a system that additionally incorporates agent-agent communication as a multi-agent system." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 185, + 204, + 198 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 185, + 204, + 198 + ], + "spans": [ + { + "bbox": [ + 69, + 185, + 204, + 198 + ], + "type": "text", + "content": "2.3.3 Single-agent Systems" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 205, + 541, + 278 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 205, + 541, + 278 + ], + "spans": [ + { + "bbox": [ + 67, + 205, + 541, + 278 + ], + "type": "text", + "content": "Given the definitions above, the interaction between the agent and its environment is a central aspect of single-agent systems. These interactions can vary widely in complexity and design. In Figure 5, we illustrate a single-agent system in the bottom left. The focus here is on designing the agent's actions—such as tool use, retrieval, or answer refinement—and obtaining useful perceptions from the environment, which may include feedback from an external verifier or compiler, or data from a knowledge base (KB). This architecture enhances the LLM's capabilities by enabling it to dynamically engage with and adapt to external contexts." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 283, + 541, + 333 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 283, + 541, + 333 + ], + "spans": [ + { + "bbox": [ + 67, + 283, + 541, + 333 + ], + "type": "text", + "content": "While a fully autonomous agent should ideally learn to interact with the environment automatically, the literature identifies several predefined interaction patterns (also referred to as workflows (Schluntz & Zhang, 2024)) that have proven effective. We elaborate on these patterns below and, in Sections 3.2 and 5.2, explore specific techniques that leverage them to improve agent performance." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 338, + 539, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 338, + 539, + 469 + ], + "spans": [ + { + "bbox": [ + 67, + 338, + 539, + 469 + ], + "type": "text", + "content": "- Generator-evaluator pattern. This pattern divides the reasoning capability into two distinct components: a generator and an evaluator (e.g., a verifier or other evaluators like compilers). It represents a natural extension of RL-style optimization and has gained popularity since the introduction of RLHF (Ouyang et al., 2022). In this setup, the evaluator functions as the environment, providing feedback on the quality of the agent's actions. Such feedback is particularly valuable for guiding the search for effective actions and improving decision-making. Recent studies have demonstrated that verifiers can significantly enhance the performance and generalization capabilities of agents (Zhang et al., 2024i; Sun et al., 2024c). However, this pattern is not without its challenges. It can suffer from unreliable components and error propagation. For instance, Kim et al. (2024d) points out that verifiers are vulnerable to reward hacking, where the reasoner exploits loopholes in the verifier to achieve higher reward scores, ultimately degrading the overall performance of the agentic system." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 475, + 539, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 475, + 539, + 584 + ], + "spans": [ + { + "bbox": [ + 67, + 475, + 539, + 584 + ], + "type": "text", + "content": "- Generator-critic-refiner pattern This pattern divides reasoning capabilities into three components: a reasoner, a critic, and a refiner. The critic acts as the environment, providing feedback—typically in the form of guidance on how to correct errors in the generated actions. The refiner then takes the flawed actions and the critic's feedback as input, producing revised and improved actions. This pattern enables the agentic system to benefit from iterative feedback, making it particularly effective for complex tasks where the initial outputs of the reasoner are suboptimal. However, it may also lead to a phenomenon known as 'over-refinement' (Chen et al., 2024b), where the agent iterates excessively, leading to diminishing returns or even degraded performance rather than improvement. Careful design and balancing of the refinement process are essential to mitigate this risk and ensure the pattern's effectiveness." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 596, + 201, + 608 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 596, + 201, + 608 + ], + "spans": [ + { + "bbox": [ + 69, + 596, + 201, + 608 + ], + "type": "text", + "content": "2.3.4 Multi-agent Systems" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 616, + 539, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 616, + 539, + 713 + ], + "spans": [ + { + "bbox": [ + 67, + 616, + 539, + 713 + ], + "type": "text", + "content": "In addition to the agent-environment loop in single-agent systems, multi-agent systems introduce an additional agent-agent loop, where multiple agents interact and influence one another. In this framework, agents assume different roles, exchange messages, and collaboratively coordinate their actions while operating within a shared environment.4 Figure 5 shows an example multi-agent system. It involves " + }, + { + "bbox": [ + 67, + 616, + 539, + 713 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 67, + 616, + 539, + 713 + ], + "type": "text", + "content": " agents (often playing distinct roles) and " + }, + { + "bbox": [ + 67, + 616, + 539, + 713 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 67, + 616, + 539, + 713 + ], + "type": "text", + "content": " rounds of communication through message exchanges. The focus is on designing effective communication protocols (e.g., debates) and coordinating the agents' actions to determine a final decision or action within the environment (e.g., employing an additional judge to adjudicate final actions). The following communication patterns have emerged as effective predefined strategies:" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 80, + 721, + 485, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 721, + 485, + 732 + ], + "spans": [ + { + "bbox": [ + 80, + 721, + 485, + 732 + ], + "type": "text", + "content": "4We use message to denote agent-agent communication and action to denote agent-environment interaction." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 164, + 79, + 435, + 232 + ], + "blocks": [ + { + "bbox": [ + 164, + 79, + 435, + 232 + ], + "lines": [ + { + "bbox": [ + 164, + 79, + 435, + 232 + ], + "spans": [ + { + "bbox": [ + 164, + 79, + 435, + 232 + ], + "type": "image", + "image_path": "9d408ce1bd52f2ac1a399692e04982fa9e638b7c08e8b740c6577799ac51cbb3.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 239, + 541, + 336 + ], + "lines": [ + { + "bbox": [ + 67, + 239, + 541, + 336 + ], + "spans": [ + { + "bbox": [ + 67, + 239, + 541, + 336 + ], + "type": "text", + "content": "Figure 6: Inference-time and training-time regimes of a reasoning system. We use tree search as an example to illustrate the inference scaling and trajectories collection. Given a query, inference scaling relies on extensive inference computation to improve the reasoner's distribution. Specifically, it generates multiple candidate reasoning steps at each layer and selects the best solution to proceed (e.g., by using an external verifier or assembling). In contrast, learning to reason focuses on collecting trajectories and training from the collected data with minimal inference-time computation. It takes all trajectories in the process (identical to those used in inference-scaling, allowing us to reuse the same tree) and labels them with preferences. The preference data can then be used to train the reasoner." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 371, + 541, + 509 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 67, + 371, + 541, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 371, + 541, + 442 + ], + "spans": [ + { + "bbox": [ + 67, + 371, + 541, + 442 + ], + "type": "text", + "content": "- Debate pattern. In this pattern, two or more agents engage in a debate with each other. The term debate can vary in implementation. For example, in (Wang et al., 2024h), it involves agents addressing the problem independently and incorporating other agents' responses as additional advice. In (Liang et al., 2023b), it means agents approach the problem from opposing perspectives. After the debate, a consensus is reached through mechanisms such as an additional judge, weighted voting, or a fixed number of iterations, ultimately determining the collective action to be taken in the environment." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 449, + 541, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 449, + 541, + 509 + ], + "spans": [ + { + "bbox": [ + 67, + 449, + 541, + 509 + ], + "type": "text", + "content": "- Reconcile pattern. This pattern facilitates collaborative round-table discussions among agents, enabling them to reach a consensus through mechanisms such as voting or confidence levels. For instance, ReConcile (Chen et al., 2023c) introduce a round-table discussion framework where agents make decisions using a weighted voting system. In this process, each agent assigns a confidence level to its proposed answers, and these confidence levels are used as weights to cast votes, ultimately determining the final decision." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 69, + 537, + 186, + 550 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 537, + 186, + 550 + ], + "spans": [ + { + "bbox": [ + 69, + 537, + 186, + 550 + ], + "type": "text", + "content": "2.4 Reasoning Regimes" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 565, + 541, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 565, + 541, + 732 + ], + "spans": [ + { + "bbox": [ + 67, + 565, + 541, + 732 + ], + "type": "text", + "content": "Orthogonal to the components and architectures discussed above, reasoning systems can operate under distinct computational regimes. Systems employing inference-time computation can refine their outputs through iterative reflection and revision or search for improved solutions by repeatedly sampling the underlying model. However, such systems must balance cost (e.g., computational resources, latency) and effectiveness (e.g., accuracy, reliability) in achieving correct solutions. The learning-to-reason paradigm addresses this tradeoff by shifting computational burdens from inference to training, learning policies from simulated reasoning processes. While both regimes enhance effectiveness by redistributing computational effort across training and inference, they lack the capacity to dynamically adapt resource allocation or method selection to individual problems—a limitation highlighted in recent work (Sprague et al., 2024a; Kapoor et al., 2024; Chen et al., 2024d). To bridge this gap, emerging approaches within the learning-to-reason framework focus on optimizing the reasoning process itself, jointly minimizing cost and maximizing effectiveness. This involves dynamically allocating computational resources, searching for contextually optimal methods, and training models to synergize with adaptive inference-time strategies. Figure 6 contrasts these regimes, and we elaborate on each in the sections below." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 72, + 80, + 538, + 135 + ], + "blocks": [ + { + "bbox": [ + 72, + 80, + 538, + 135 + ], + "lines": [ + { + "bbox": [ + 72, + 80, + 538, + 135 + ], + "spans": [ + { + "bbox": [ + 72, + 80, + 538, + 135 + ], + "type": "table", + "html": "
PerspectiveMethodCharacteristicRepresentative Work
Constructing PromptsInstruction engineeringModify instruction by human-design templateParanjape et al. (2021); Zhou et al. (2023b)
Demonstration engineeringDrawing analogy from relevant experienceWei et al. (2022b); Luo et al. (2024d)
Prompt optimizationSearch for optimized prompt (e.g., bootstrap)Xu et al. (2022); Pryzant et al. (2023)
Optimizing OutputGenerating subtasksDecompose the original task into manageable subtasksDua et al. (2022); Zhou et al. (2023a)
Exploration and searchBranch and explore multiple paths to optimize reasoning trajectoriesYao et al. (2023a); Besta et al. (2024)
", + "image_path": "3df501760c731b5b3866e2f732d8ca9368b17b759d192490f7d38cbc2fd05fd1.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 169, + 143, + 440, + 156 + ], + "lines": [ + { + "bbox": [ + 169, + 143, + 440, + 156 + ], + "spans": [ + { + "bbox": [ + 169, + 143, + 440, + 156 + ], + "type": "text", + "content": "Table 2: Summary of inference scaling with standalone LLM." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 69, + 179, + 185, + 192 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 179, + 185, + 192 + ], + "spans": [ + { + "bbox": [ + 69, + 179, + 185, + 192 + ], + "type": "text", + "content": "2.4.1 Inference Scaling" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 201, + 541, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 201, + 541, + 357 + ], + "spans": [ + { + "bbox": [ + 67, + 201, + 541, + 357 + ], + "type": "text", + "content": "Inference scaling techniques enhance reasoning capabilities during test time by increasing the amount of computation performed before generating an answer. These methods can be broadly categorized into three key strategies: (a) Prompt engineering and optimization, which focuses on constructing effective reasoning-provoking prompts through template-based methods, human curation, and automated optimization. (b) Search and planning methods, which include task decomposition, plan generation and verification, and exploration-based approaches. They enable structured multi-step reasoning, often involving backtracking within trees or graphs, to systematically explore potential solutions and verify their validity. (c) System-level enhancements, which incorporates external tools, knowledge sources, and verification mechanisms to augment the model's reasoning capabilities. For standalone LLMs, inference scaling primarily revolves around prompt construction and search strategies. In multi-agent settings, it further extends to include agent-agent communication and coordinated action strategies, enabling collaborative problem-solving. While these techniques have demonstrated significant effectiveness in improving reasoning performance without requiring updates to model parameters, they often come with increased computational costs during inference." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 372, + 194, + 384 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 372, + 194, + 384 + ], + "spans": [ + { + "bbox": [ + 69, + 372, + 194, + 384 + ], + "type": "text", + "content": "2.4.2 Learning to Reason" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 393, + 541, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 393, + 541, + 466 + ], + "spans": [ + { + "bbox": [ + 67, + 393, + 541, + 466 + ], + "type": "text", + "content": "This regime shifts the focus to training models to reason effectively before deployment, often referred to as training-time methods. The core idea is to simulate inference, generating trajectories that capture potential reasoning paths. These trajectories are then used to train the reasoner with online or offline learning methods. The methods include supervised and/or reinforcement learning. While learning-to-reason typically minimizes computational costs during inference, it incurs higher costs during simulation and training. In Section 5, we provide a detailed discussion of methods within this regime across different architectures." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 471, + 541, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 471, + 541, + 521 + ], + "spans": [ + { + "bbox": [ + 67, + 471, + 541, + 521 + ], + "type": "text", + "content": "Recently, this paradigm has evolved to incorporate knowledge of both training and testing methods, enabling adaptive strategies. For instance, it now allows for the training of reasoners optimized for known inference techniques (Balashankar et al., 2024), or dynamically distributes computational costs between training and testing, offering a more flexible and efficient framework (Damani et al., 2025; Yue et al., 2025)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 537, + 332, + 552 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 537, + 332, + 552 + ], + "spans": [ + { + "bbox": [ + 69, + 537, + 332, + 552 + ], + "type": "text", + "content": "3 Improving Reasoning with Inference Scaling" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 564, + 541, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 564, + 541, + 733 + ], + "spans": [ + { + "bbox": [ + 67, + 564, + 541, + 733 + ], + "type": "text", + "content": "Compared to small-scale models, pretrained large-scale language models (LLMs) have demonstrated emergent capabilities (Wei et al., 2022a), such as in-context learning (Dong et al., 2024) and role-playing (Shanahan et al., 2023a), which manifest without additional fine-tuning (i.e., without any gradient updates). Arguably, many of these abilities become apparent only after reaching a certain scale in model size. While scaling model parameters has been shown to improve reasoning performance across various tasks, the returns have diminished due to the high cost of training increasingly larger models. As a result, inference scaling has emerged as an appealing and orthogonal paradigm to unlock reasoning abilities in LLMs by providing additional test-time compute, allowing them to \"think\" before producing a final answer. It has been demonstrated that optimal scaling of test-time compute can be more effective than scaling model parameters (Snell et al., 2024), as it offers better generalization through enhanced flexibility in prompt and workflow design. Such deliberate thinking can be enabled either through training (DeepSeek-AI et al., 2025) or by explicit programming at inference time (OpenAI et al., 2024). In this section, we focus on the latter and defer training-time methods to Section 5. We begin with inference scaling methods for standalone LLMs and subsequently extend the discussion to single and multi-agent compound systems." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 82, + 281, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 82, + 281, + 95 + ], + "spans": [ + { + "bbox": [ + 69, + 82, + 281, + 95 + ], + "type": "text", + "content": "3.1 Inference Scaling With Standalone LLM" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 105, + 541, + 143 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 105, + 541, + 143 + ], + "spans": [ + { + "bbox": [ + 68, + 105, + 541, + 143 + ], + "type": "text", + "content": "In this section, we examine the core components and techniques that have made inference-time reasoning methods effective. Many of these methods draw inspiration from research on human cognitive processes on planning, problem solving, and decision-making (Newell et al., 1959; 1972; Stanovich & West, 2000)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 157, + 307, + 170 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 157, + 307, + 170 + ], + "spans": [ + { + "bbox": [ + 69, + 157, + 307, + 170 + ], + "type": "text", + "content": "3.1.1 Constructing Reasoning Provoking Prompts" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 179, + 541, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 179, + 541, + 240 + ], + "spans": [ + { + "bbox": [ + 68, + 179, + 541, + 240 + ], + "type": "text", + "content": "Although large-scale pre-training endows LLMs with patterns that support reasoning, these capabilities often remain latent under generic prompts. Liu et al. (2025c) demonstrate that deep-reasoning behaviors—such as reflection and self-verification, which signal profound analytical thought—can be amplified simply by increasing the sampling budget. This highlights the importance of designing prompts that deliberately provoke reasoning, thereby surfacing and leveraging the latent human priors within LLMs." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 68, + 255, + 541, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 255, + 541, + 435 + ], + "spans": [ + { + "bbox": [ + 68, + 255, + 541, + 435 + ], + "type": "text", + "content": "Instruction engineering Enabling LLMs to reason effectively depends heavily on the quality of the instructions provided (Sclar et al., 2024; Zhuo et al., 2024; Long et al., 2024a). Recognizing this, numerous prompt engineering studies aim to improve LLM reasoning by enhancing instructions. Extensive efforts in this direction primarily focus on template-based and human-curated instructions (Paranjape et al., 2021; Sanh et al., 2022; Mishra et al., 2022; Si et al., 2023; Long et al., 2024b). With LLMs becoming increasingly adept at following human instructions and generating human-like text, focus has shifted toward leveraging the models themselves to craft and refine high-quality instructions. A notable example of this shift is the Automatic Prompt Engineer (APE) introduced by Zhou et al. (2023b), which uses LLMs to generate high-quality instructions, achieving performance comparable to or surpassing that of human annotators on 31 reasoning tasks. Furthermore, other studies have proposed methods to modify instructions for improved reasoning. For instance, Deng et al. (2023a) and Mekala et al. (2024) present Rephrase-and-Response and EchoPrompt, respectively, two simple yet effective strategies where LLMs are instructed to rephrase queries before answering, significantly enhancing LLM performance on reasoning tasks. Similarly, Tian et al. (2023) introduce R3 prompting, which instructs LLMs to first extract key sentences from noisy contexts, then rephrase the instruction to explicitly include extracted sentences." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 451, + 541, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 451, + 541, + 608 + ], + "spans": [ + { + "bbox": [ + 68, + 451, + 541, + 608 + ], + "type": "text", + "content": "Demonstration engineering Humans can address new problems by drawing analogy from relevant past experience (Holyoak, 2012). Inspired by this, Yasunaga et al. (2024) propose analogical prompting to guide LLMs to self-generate exemplars or knowledge relevant to the given problem as few-shot demonstrations for reasoning, outperforming hand-crafted or retrieved examples. For example, LLMs are prompted to generate a problem on calculating a third-order determinant before solving the given fourth-order determinant. Similarly, Chen et al. (2023d); Yang et al. (2023a); Luo et al. (2024a) highlight the effectiveness of self-generated relevant exemplars. Qin et al. (2025) further systematically assess the capability of LLMs to perform analogical reasoning and find that performance is not primarily determined by whether the exemplars are topically relevant to the task. Instead, they show that even exemplars from unrelated domains, such as self-generated biological exemplars, can lead to improved performance, as long as they are accurate and structurally aligned with the reasoning steps required by the target task. This highlights that the quality of the exemplar (its correctness, clarity, and structural usefulness for reasoning) can be the key limiting factor, rather than the relevancy regarding to the topic domain." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 613, + 541, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 613, + 541, + 733 + ], + "spans": [ + { + "bbox": [ + 68, + 613, + 541, + 733 + ], + "type": "text", + "content": "Conventionally, a fixed set of few-shot demonstrations is applied to all queries, which can be suboptimal, especially when queries vary significantly. An alternative approach is to retrieve demonstrations tailored to the current query. Research has shown that retrieval-based demonstration selection significantly improves task performance. The main goals for selecting demonstrations are similarity (Rubin et al., 2022; Agrawal et al., 2023; Li et al., 2023e; Ye et al., 2023a) and diversity (Levy et al., 2023; He et al., 2023; Kim et al., 2024a). Various retrieval strategies have been proposed for selecting " + }, + { + "bbox": [ + 68, + 613, + 541, + 733 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 68, + 613, + 541, + 733 + ], + "type": "text", + "content": " demonstrations, including top- " + }, + { + "bbox": [ + 68, + 613, + 541, + 733 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 68, + 613, + 541, + 733 + ], + "type": "text", + "content": " similarity-based retrieval (Liu et al., 2022; Li et al., 2023e), clustering-based retrieval (Luo et al., 2023c; Wang et al., 2024i), and iterative retrieval (Khattab et al., 2022; Levy et al., 2023; Wang et al., 2024e). These methods enable adaptive and effective demonstration selection, enhancing the model's reasoning and generalization across diverse queries." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 82, + 541, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 82, + 541, + 215 + ], + "spans": [ + { + "bbox": [ + 67, + 82, + 541, + 215 + ], + "type": "text", + "content": "In addition, many-shot in-context learning has emerged as a complementary line of work, where hundreds or even thousands of demonstrations are provided to significantly enhance the performance of LLMs, especially on complex reasoning tasks (Li et al., 2023c; Agarwal et al., 2024; Zou et al., 2024; Gu et al., 2025). Many-shot prompting can be seen as an extreme form of demonstration engineering, where the focus is on scaling the quantity of demonstrations to maximize the model's capacity to learn from in-context examples. However, the effectiveness of many-shot ICL is often limited by the high cost of obtaining a large number of labeled demonstrations. To mitigate this gap, Chen et al. (2025) recently introduce MAPLE, a novel influence-based many-shot ICL framework that identifies impactful unlabeled samples, pseudo-labels them by querying LLMs, and adaptively selects them for each test query. This approach effectively enhances many-shot ICL performance with minimal labeling cost, demonstrating improved adaptability and reasoning capabilities of LLMs." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 228, + 541, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 228, + 541, + 422 + ], + "spans": [ + { + "bbox": [ + 67, + 228, + 541, + 422 + ], + "type": "text", + "content": "Prompt optimization Prompt optimization methods, aiming to systematically and strategically optimize prompts for improved performance, have been extensively explored for enhancing LLM reasoning. For instance, Xu et al. (2022) introduce Genetic Prompt Search (GPS), leveraging genetic algorithms to search for the best instruction. Similarly, Guo et al. (2024a) and Fernando et al. (2024) employ evolutionary algorithms to iteratively refine instructions, while Long et al. (2024c) introduce a minimax-game framework, inspired by Generative Adversarial Networks (Goodfellow et al., 2014) to simultaneously optimize instructions and demonstrations. Furthermore, Pryzant et al. (2023) present the concept of \"text gradients\" which leverage feedback from prompt executions and LLMs to update prompts, akin to Optimization by PROempting (OPRO) (Yang et al., 2024c), which uses execution feedback. Despite these advances, the interplay between various prompt optimization algorithms remains underexplored. Recently, Wan et al. (2024a) conducted a comprehensive evaluation of representative techniques for instruction and demonstration optimization, examining their effectiveness in isolation and combination across a range of challenging tasks. Their findings indicate that intelligently reusing samples from prompt evaluations as demonstrations consistently enhances performance, that demonstration selection strategies can have a greater impact than instruction optimization techniques, and that a synergistic combination of demonstration and instruction optimization can outperform their individual contributions." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 436, + 362, + 449 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 436, + 362, + 449 + ], + "spans": [ + { + "bbox": [ + 69, + 436, + 362, + 449 + ], + "type": "text", + "content": "3.1.2 Optimizing Reasoning Output with Search and Planning" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 457, + 541, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 457, + 541, + 567 + ], + "spans": [ + { + "bbox": [ + 67, + 457, + 541, + 567 + ], + "type": "text", + "content": "Generating reasoning subtasks Human problem-solving often involves planning manageable steps that lead to a successful resolution (Dostál, 2015). Likewise, improving LLM reasoning by breaking down complex problems into intermediate steps has become a successful paradigm. In this context, subtasks refer to the decomposed parts of a problem, structures are the frameworks guiding the reasoning process, and intermediate steps are intermediate results produced at each stage of problem-solving. Nye et al. (2021) and Wei et al. (2022b) pioneer this direction by proposing Chain-of-Thought (CoT) prompting which uses a few demonstrations with human-written intermediate steps to guide the model in solving complex problems in a similar style. Kojima et al. (2022) further simplified this approach by introducing zero-shot CoT prompting, which eliminates the need for demonstrations by instructing models to \"think step by step\" before answering." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 571, + 539, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 571, + 539, + 656 + ], + "spans": [ + { + "bbox": [ + 67, + 571, + 539, + 656 + ], + "type": "text", + "content": "Simple CoT prompting often struggles as task complexity increases, particularly when the task surpasses the complexity of the provided demonstrations. To address this, researchers have proposed methods that explicitly guide models in decomposing tasks into subtasks, thereby enhancing intermediate step reasoning. Dua et al. (2022) propose an iterative approach, where tasks are progressively broken down into simpler subtasks and solved step-by-step. Similarly, Zhou et al. (2023a); Khot et al. (2023) and Suzgun & Kalai (2024a) advocate for a \"divide-and-conquer\" strategy, where tasks are first divided into subtasks and then solved sequentially." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 660, + 541, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 660, + 541, + 733 + ], + "spans": [ + { + "bbox": [ + 67, + 660, + 541, + 733 + ], + "type": "text", + "content": "Beyond subtasks, researchers emphasize the importance of robust reasoning structures such as hierarchical and decision-making processes that capture the underlying mechanisms involved in problem-solving. Zhou et al. (2024b) introduce Self-Disccover, a framework that enables models to self-identify reasoning structures for any task using a seed set of general reasoning skill modules. Building on this, Aswani et al. (2024) propose Auto-Evolve, which dynamically adapts reasoning modules to accommodate more diverse problems. In addition to designing better reasoning steps, several studies address the need to correct intermediate" + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 761 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 72, + 80, + 538, + 114 + ], + "blocks": [ + { + "bbox": [ + 72, + 80, + 538, + 114 + ], + "lines": [ + { + "bbox": [ + 72, + 80, + 538, + 114 + ], + "spans": [ + { + "bbox": [ + 72, + 80, + 538, + 114 + ], + "type": "table", + "html": "
PerspectiveMethodCharacteristicRepresentative Work
Feedback RefinementVerifier and ReflectionUse verifiers to select, modify, or refine actionsSnell et al. (2025); Madaan et al. (2023b)
Action EnhancementRetrieval and ToolAccess external knowledge and specialized resourcesLi et al. (2024e); Ma et al. (2024a)
", + "image_path": "b56b161b40385ab764234fe1b8b74a6302e624a8559602cb7e9c4810b1a0ed83.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 164, + 123, + 444, + 135 + ], + "lines": [ + { + "bbox": [ + 164, + 123, + 444, + 135 + ], + "spans": [ + { + "bbox": [ + 164, + 123, + 444, + 135 + ], + "type": "text", + "content": "Table 3: Summary of inference scaling with single-agent system" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 157, + 541, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 157, + 541, + 194 + ], + "spans": [ + { + "bbox": [ + 67, + 157, + 541, + 194 + ], + "type": "text", + "content": "steps. For example, Deng et al. (2024a); Yan et al. (2024) and Wu et al. (2024b) propose methods to refine intermediate outputs. Notably, Zhang et al. (2024i) observe that smaller models (" + }, + { + "bbox": [ + 67, + 157, + 541, + 194 + ], + "type": "inline_equation", + "content": "\\leq 13\\mathrm{B}" + }, + { + "bbox": [ + 67, + 157, + 541, + 194 + ], + "type": "text", + "content": " parameters) in particular need stronger models acting as verifiers to validate and correct intermediate steps." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 205, + 541, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 205, + 541, + 397 + ], + "spans": [ + { + "bbox": [ + 70, + 205, + 541, + 397 + ], + "type": "text", + "content": "Exploration and search Research on human problem-solving reveals that complex reasoning tasks often admit multiple valid paths to reach a correct solution (Stanovich & West, 2000). Compared to linear reasoning structures like chain-of-thought, approaches that incorporate exploration during problem-solving have shown significant improvements for complex reasoning tasks. Unlike task decomposition methods (Dua et al., 2022; Zhou et al., 2023a; Khot et al., 2023), exploration-based approaches employ dynamic search through multiple possible reasoning paths simultaneously rather than following certain decomposition patterns, enabling models to explore ambiguous solution strategies for complex problems. Exploration typically involves two key components: branching and aggregation. Due to the stochastic nature of language model decoding, branching is often implemented through independent re-sampling with non-zero temperature, generating diverse reasoning chains. Early methods, such as self-consistency (Wang et al., 2023f), introduced branching only at the beginning of the reasoning chain, conditioned on the initial query. While simple, this approach lacks local exploration of intermediate reasoning steps, has limited applicability for tasks with multiple valid answers, and produces reasoning chains with restricted diversity (Chen et al., 2024d). More recent advancements, such as Tree-of-Thoughts (Yao et al., 2023a), Graph-of-Thoughts (Besta et al., 2024), and Forest-of-Thoughts (Bi et al., 2024), enable finer-grained branching by considering both the query and a history of previous thoughts or thought-state sequences, allowing for more nuanced and flexible exploration." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 403, + 541, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 403, + 541, + 487 + ], + "spans": [ + { + "bbox": [ + 67, + 403, + 541, + 487 + ], + "type": "text", + "content": "The effectiveness of branched reasoning paths with thoughts or answers depends on aggregation or evaluation strategies. Recent progress is centered around two categories: ensemble-based methods and verifier-based methods. Ensemble-based methods have been widely employed due to their simplicity and self-contained nature, requiring no external knowledge or sources for validation. These approaches typically employ strategies such as majority voting across answer tokens (Wang et al., 2023f; 2024a; Li et al., 2024b) or confidence-based selection (Wang & Zhou, 2024). Verifier-based methods, in contrast, employ external verifiers or judges to score and select preferred answers among candidate solutions." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 501, + 298, + 514 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 501, + 298, + 514 + ], + "spans": [ + { + "bbox": [ + 69, + 501, + 298, + 514 + ], + "type": "text", + "content": "3.2 Inference Scaling With Single-agent System" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 522, + 541, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 522, + 541, + 643 + ], + "spans": [ + { + "bbox": [ + 67, + 522, + 541, + 643 + ], + "type": "text", + "content": "LLMs are trained on static, finite datasets, which inherently limits their parametric knowledge. This limitation hinders their ability to reason effectively in scenarios requiring up-to-date or highly specialized knowledge. The use of an agentic system, where LLMs are augmented with external verifiers, retrieval and tool integration, has proven effective in such scenarios. Verifiers provide reasoners with a signal of the quality of their outputs (e.g., a score or natural language feedback), which may be used by reasoners to modify or improve their outputs. Retrieval augmentation improves reasoning by enabling the agent to access relevant external knowledge, thereby reducing hallucinations and ensuring more accurate, fact-based responses. Additionally, the agent can achieve higher performance by leveraging specialized external tools to handle specific intermediate reasoning steps. For instance, allowing an agent to use a calculator can minimize errors stemming from inaccuracies in numerical generation." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 648, + 541, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 648, + 541, + 733 + ], + "spans": [ + { + "bbox": [ + 67, + 648, + 541, + 733 + ], + "type": "text", + "content": "A pioneering approach in this domain is the ReAct framework (Yao et al., 2023b), which interleaves reasoning and acting by prompting LLMs to generate both reasoning traces and task-specific actions in an interleaved manner. This synergy allows the model to induce, track, and update action plans while interfacing with external sources (environment) to gather additional information. ReAct has demonstrated effectiveness across QA and interactive decision-making tasks. Building upon ReAct, LATS (Zhou et al., 2024a) unifies reasoning, acting, and planning within LLMs. By combining Monte Carlo Tree Search with ReAct, LATS enables structured search over a combinatorial space of reasoning and acting paths. More recently, Liu et al." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 82, + 541, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 82, + 541, + 106 + ], + "spans": [ + { + "bbox": [ + 69, + 82, + 541, + 106 + ], + "type": "text", + "content": "(2024f) formalize reasoning and acting with LLMs under a Bayesian adaptive MDP and propose RAFA, a theoretically grounded framework for orchestrating the reasoning and acting of LLMs." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 118, + 295, + 130 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 118, + 295, + 130 + ], + "spans": [ + { + "bbox": [ + 69, + 118, + 295, + 130 + ], + "type": "text", + "content": "3.2.1 Refinement with Verifiers and Reflections" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 138, + 541, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 138, + 541, + 236 + ], + "spans": [ + { + "bbox": [ + 68, + 138, + 541, + 236 + ], + "type": "text", + "content": "A natural basis for modifying agent actions is the quality of their generated outputs—if the output is incorrect, the agent should attempt to correct it. However, ground-truth references are typically unavailable to the agent at test time. In such scenarios, agents often rely on verifiers, which are models or systems that provide an approximate measure of correctness, to guide action modifications. A special case arises when the verifier has access to ground-truth outcomes. Oracle verifiers (First et al., 2023; Xin et al., 2024a), which leverage correct answers, have shown significant performance improvements over baselines without verifiers (Huang et al., 2024a; Brown et al., 2024). However, their applicability is limited to scenarios where ground-truth data is readily available or easily accessible, such as in games or structured environments." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 239, + 541, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 239, + 541, + 324 + ], + "spans": [ + { + "bbox": [ + 68, + 239, + 541, + 324 + ], + "type": "text", + "content": "In contrast, non-oracle (or imperfect) verifiers provide a more widely applicable solution. Their form varies depending on the task and knowledge source. For instance, Cobbe et al. (2021); Feng et al. (2023b); Snell et al. (2025) employ trained outcome reward models (ORMs) as verifiers to rerank responses. For more granular evaluation, Lightman et al. (2024) and Zhang et al. (2025b) train process reward models (PRMs) to serve as inference-time verifiers. By enabling the reward model to assess each reasoning step individually, PRMs generally yield greater improvements during inference compared to ORMs (Uesato et al., 2022; Tian et al., 2024)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 68, + 329, + 541, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 329, + 541, + 473 + ], + "spans": [ + { + "bbox": [ + 68, + 329, + 541, + 473 + ], + "type": "text", + "content": "While reward models provide actionable signals about the quality of model responses, they are non-generative verifiers. As a result, they are unsuitable for verification approaches that require natural language feedback. For instance, synthesizing unit tests (Chen et al., 2023b; Hassid et al., 2024; Kapoor et al., 2024; Cook et al., 2024), commonly used in code generation tasks, necessitates verifiers capable of generating natural language. Broadly, generative verifiers are referred to as either critique models or LLM-as-judge models. In both cases, LLMs are either prompted or fine-tuned specifically for critique and evaluation. These models have been employed not only for output reranking (Vu et al., 2024) but also for providing valuable natural language feedback (Shinn et al., 2024; Shridhar et al., 2024; McAleese et al., 2024). However, recent studies have found that LLM-as-judge models generally underperform reward models (RMs) in terms of verification (Zhang et al., 2024e). To address this, researchers have sought to combine the strengths of both approaches under the Generative RM framework (Zhang et al., 2024e; Mahan et al., 2024; Liu et al., 2025b), aiming to unify the advantages of generative feedback with the precision of reward-based evaluation." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 479, + 541, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 479, + 541, + 552 + ], + "spans": [ + { + "bbox": [ + 68, + 479, + 541, + 552 + ], + "type": "text", + "content": "Self-reflection or self-refinement approaches (Saunders et al., 2022; Madaan et al., 2024) aim to eliminate the need for additional, specialized verifier models by enabling the agent to critique and refine its own outputs. While some studies (Saunders et al., 2022; Madaan et al., 2024) have demonstrated empirical success, others highlight poor performance in the absence of robust verifiers (Stechly et al., 2023; Huang et al., 2024a; Stechly et al., 2024; Valmeekam et al., 2023; Shridhar et al., 2024). For a comprehensive review of recent advancements, see (Pan et al., 2024b)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 556, + 540, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 556, + 540, + 605 + ], + "spans": [ + { + "bbox": [ + 68, + 556, + 540, + 605 + ], + "type": "text", + "content": "While verification methods can be deployed across a wider range of domains, they are susceptible to false positives—incorrect solutions that nevertheless pass verification. This limitation becomes particularly relevant when scaling up inference compute, as it can lead to diminishing returns on computational investment. Interested readers can refer to (Stroebl et al., 2024) for a comprehensive analysis of these trade-offs." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 616, + 346, + 628 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 616, + 346, + 628 + ], + "spans": [ + { + "bbox": [ + 69, + 616, + 346, + 628 + ], + "type": "text", + "content": "3.2.2 Enhancement through Retrieval and Tool Utilization" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 68, + 637, + 541, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 637, + 541, + 733 + ], + "spans": [ + { + "bbox": [ + 68, + 637, + 541, + 733 + ], + "type": "text", + "content": "During the reasoning process, agents can retrieve external knowledge to refine their internal state representations, resulting in more accurate reasoning steps. The advantages of retrieval are particularly pronounced in knowledge-intensive tasks that demand multi-hop and long-horizon reasoning, where connecting multiple pieces of information is essential to arrive at a final answer. Through retrieval, agents can access intermediate information, verify connections between data points, and integrate them into their reasoning process (Shi et al., 2024; Jiang et al., 2024b; Wang et al., 2024m). Retrieval also addresses critical flaws in LLMs, such as hallucination and factual inaccuracies. By grounding responses in retrieved facts, models are less prone to generating erroneous information and more likely to produce reliable and trustworthy outputs. For" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 73, + 80, + 538, + 135 + ], + "blocks": [ + { + "bbox": [ + 73, + 80, + 538, + 135 + ], + "lines": [ + { + "bbox": [ + 73, + 80, + 538, + 135 + ], + "spans": [ + { + "bbox": [ + 73, + 80, + 538, + 135 + ], + "type": "table", + "html": "
PerspectiveMethodCharacteristicRepresentative Work
DesigningDecentralizedNo hierarchy among agentsChen et al. (2023c); Chang (2024)
CommunicationCentralizedPresence of a central lead agentSuzgun & Kalai (2024a); Pan et al. (2024a)
ActionConditioned generationPerform reasoning based on other agents' outputsWang et al. (2024c); Gao et al. (2024b)
CoordinationDynamic adaptationAdapt actions based on specific tasksFourney et al. (2024); Yuan et al. (2024c)
", + "image_path": "acc497e70ddf56ff5155272a2c39df1404b99cdd1fd1aff432306a10918635ad.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 167, + 142, + 442, + 155 + ], + "lines": [ + { + "bbox": [ + 167, + 142, + 442, + 155 + ], + "spans": [ + { + "bbox": [ + 167, + 142, + 442, + 155 + ], + "type": "text", + "content": "Table 4: Summary of inference scaling in multi-agent systems." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 176, + 541, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 176, + 541, + 297 + ], + "spans": [ + { + "bbox": [ + 67, + 176, + 541, + 297 + ], + "type": "text", + "content": "instance, frameworks such as Verify-and-Edit (Zhao et al., 2023) and Chain-of-Knowledge (Li et al., 2024e) dynamically incorporate structured and unstructured knowledge sources to revise and correct intermediate reasoning steps within a reasoning chain. CRP-RAG (Xu et al., 2024b) improves multi-hop reasoning by dynamically adjusting reasoning paths and aggregating relevant knowledge. SelfRewardRAG (Hammane et al., 2024) enhances medical reasoning by combining RAG with self-evaluation, dynamically retrieving and synthesizing up-to-date medical information to ensure accurate response generation. By leveraging real-time data, such as clinical records from PubMed, it ensures responses are both current and precise. Another example is Think-on-Graph (Sun et al., 2023), a retrieval framework that integrates knowledge graphs (KGs) and text retrieval to deepen and refine reasoning in LLMs. GRATR (Zhu et al., 2024b) applies RAG techniques to enhance reasoning in multiplayer games with incomplete information." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 302, + 541, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 302, + 541, + 387 + ], + "spans": [ + { + "bbox": [ + 67, + 302, + 541, + 387 + ], + "type": "text", + "content": "In addition to search and retrieval, agents can utilize other specialized tools to overcome their inherent limitations and significantly enhance reasoning performance. By integrating tools such as calculators, compilers, calendars, or specialized APIs, agents can access domain-specific resources, enabling them to operate more effectively in targeted applications (Yu et al., 2023b; Lu et al., 2024a; Li et al., 2025a). For instance, SCIAGENT (Ma et al., 2024b) leverages domain-specific tools like SymPy and WolframAlpha to enhance the reasoning capabilities of LLMs in scientific domains. Similarly, FinAgent (Zhang et al., 2024g) combines textual, numerical, and visual tools to improve performance in financial trading tasks." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 392, + 541, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 392, + 541, + 464 + ], + "spans": [ + { + "bbox": [ + 67, + 392, + 541, + 464 + ], + "type": "text", + "content": "Moreover, external tools provide precise computational capabilities, allowing LLMs to transcend their limitations and perform complex numerical tasks with higher accuracy (Chen et al., 2023e; Li et al., 2023a). For example, MATHSENSEI (Das et al., 2024) employs tools such as Python, WolframAlpha, and Bing Search to tackle mathematical reasoning tasks across disciplines like algebra and calculus. TART (Lu et al., 2024b) integrates LLMs with tools for precise table-based reasoning tasks, such as table question answering and fact verification." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 469, + 541, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 469, + 541, + 555 + ], + "spans": [ + { + "bbox": [ + 67, + 469, + 541, + 555 + ], + "type": "text", + "content": "Moreover, Anthropic introduced an open standard of Model Context Protocol (MCP) to seamlessly connect AI assistants with real-world data sources such as content repositories, business tools, and development environments. It provides a universal, scalable way for developers to create secure, two-way connections between AI tools and diverse data systems. While MCP holds significant promise, its adoption also introduces several challenges that must be addressed to support sustainable growth and responsible development. Hou et al. (2025) discussed some key issues, such as the absence of centralized security oversight, gaps in authentication and authorization, and difficulties in maintaining consistency across multi-step, cross-system workflows." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 568, + 299, + 582 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 568, + 299, + 582 + ], + "spans": [ + { + "bbox": [ + 67, + 568, + 299, + 582 + ], + "type": "text", + "content": "3.3 Inference Scaling With Multi-agent Systems" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 590, + 541, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 590, + 541, + 651 + ], + "spans": [ + { + "bbox": [ + 67, + 590, + 541, + 651 + ], + "type": "text", + "content": "By strategically designing communication patterns and coordinating actions, multi-agent systems can achieve more sophisticated reasoning by harnessing the specialized capabilities of multiple agents (Guo et al., 2024b). Effective communication design involves establishing structured message exchanges and interaction patterns among agents, while action coordination focuses on reconciling diverse outputs and achieving consensus to determine the final action in the environment." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 664, + 268, + 677 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 664, + 268, + 677 + ], + "spans": [ + { + "bbox": [ + 67, + 664, + 268, + 677 + ], + "type": "text", + "content": "3.3.1 Designing Communication Patterns" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 685, + 541, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 685, + 541, + 734 + ], + "spans": [ + { + "bbox": [ + 67, + 685, + 541, + 734 + ], + "type": "text", + "content": "A common communication pattern in multi-agent frameworks involves engaging multiple agents in debates or discussions (Liang et al., 2023b). For instance, the RECONCILE framework (Chen et al., 2023c) requires each agent to generate an answer accompanied by an explanation and a confidence score. The agents then participate in multi-round discussions to refine their responses, and a confidence-weighted voting mechanism" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 82, + 541, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 82, + 541, + 131 + ], + "spans": [ + { + "bbox": [ + 67, + 82, + 541, + 131 + ], + "type": "text", + "content": "aggregates the answers into a consensus. Similarly, SocraSynth (Chang, 2024) employs opposing LLM agents moderated by predefined contentiousness levels to explore diverse perspectives. Additionally, GroupDebate (Liu et al., 2024e) organizes agents into groups that conduct internal debates before sharing their results, reducing token costs while maintaining robust logical reasoning capabilities." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 136, + 541, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 136, + 541, + 209 + ], + "spans": [ + { + "bbox": [ + 67, + 136, + 541, + 209 + ], + "type": "text", + "content": "Besides decentralized communication, prior works also consider sending messages to a central node for decision making. For example, Suzgun & Kalai (2024b) employs a language model as a multi-faceted conductor that is good at handling and integrating various queries. Moreover, AgentCood (Pan et al., 2024a) assigns an LLM the role of a central planner for coordination strategy generation and agent assignment. Compared with decentralized communication, it can lead to more efficient resource allocation but increase the system vulnerability to potential failure of the central node." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 220, + 198, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 220, + 198, + 232 + ], + "spans": [ + { + "bbox": [ + 69, + 220, + 198, + 232 + ], + "type": "text", + "content": "3.3.2 Coordinating Action" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 239, + 541, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 239, + 541, + 289 + ], + "spans": [ + { + "bbox": [ + 67, + 239, + 541, + 289 + ], + "type": "text", + "content": "Effective action coordination among multiple agents is important for achieving the shared goals, especially given a dynamic and complex environment. Prior works explore various strategies which can enable agents to synergise agents' actions and optimize overall system reasoning and problem-solving performance. This approach leverages the strengths of different LLMs to overcome the limitations of individual models." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 294, + 541, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 294, + 541, + 390 + ], + "spans": [ + { + "bbox": [ + 67, + 294, + 541, + 390 + ], + "type": "text", + "content": "One straightforward coordination strategy is chaining agents in a row, where agents can perform reasoning based on other agents' outputs. For example, Mixture-of-Agents (MoA) (Wang et al., 2024c) capitalizes on the cooperative nature of LLMs, allowing models to generate higher-quality responses by integrating and synthesizing contributions from multiple agents, achieving state-of-the-art performance. Similarly, Meta-Reasoning Prompting (MRP) (Gao et al., 2024b) assigns each agent to dynamically select the most effective reasoning method from a reasoning pool for a specific task, enabling the integration of diverse strategies to efficiently address multiple tasks. In addition, CoMM (Chen et al., 2024c) makes agents respond to discussions based on different role-playings." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 396, + 541, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 396, + 541, + 468 + ], + "spans": [ + { + "bbox": [ + 67, + 396, + 541, + 468 + ], + "type": "text", + "content": "Moreover, coordination action can incorporate dynamic adaptation to task requirements. For example, Magentic-One (Fourney et al., 2024) introduces a lead agent as Orchestrator to conduct dynamic planning based on varied tasks. Gabriel et al. (2024) proposes a framework that deals with multi-hop queries, produces and executes task graphs, chooses suitable tools, and dynamically adapts to real-time changes. Additionally, EVOAGENT (Yuan et al., 2024c) dynamically generates various agents suitable for the given task and select those with high-quality outputs for result generation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 482, + 203, + 497 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 482, + 203, + 497 + ], + "spans": [ + { + "bbox": [ + 69, + 482, + 203, + 497 + ], + "type": "text", + "content": "4 Learning Algorithms" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 508, + 541, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 508, + 541, + 568 + ], + "spans": [ + { + "bbox": [ + 67, + 508, + 541, + 568 + ], + "type": "text", + "content": "Before delving into methodologies for training reasoning models, we first describe the foundational learning algorithms used to train the reasoner's policy and verifiers. These algorithms are defined by their precise loss functions. Note that learning algorithms are independent of the data curation process, which will be discussed in detail in Section 5. We begin by presenting commonly used learning algorithms for training reasoning models in Section 4.1, followed by a discussion on training verifiers in Section 4.2." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 581, + 194, + 594 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 581, + 194, + 594 + ], + "spans": [ + { + "bbox": [ + 69, + 581, + 194, + 594 + ], + "type": "text", + "content": "4.1 Learning of Reasoner" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 602, + 541, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 602, + 541, + 628 + ], + "spans": [ + { + "bbox": [ + 67, + 602, + 541, + 628 + ], + "type": "text", + "content": "This section is organized into three key parts: (1) imitation learning through supervised fine-tuning, (2) reinforcement learning, and (3) preference learning." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 638, + 306, + 651 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 638, + 306, + 651 + ], + "spans": [ + { + "bbox": [ + 67, + 638, + 306, + 651 + ], + "type": "text", + "content": "4.1.1 Imitation Learning - Supervised Fine-tuning" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 658, + 541, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 658, + 541, + 696 + ], + "spans": [ + { + "bbox": [ + 67, + 658, + 541, + 696 + ], + "type": "text", + "content": "Supervised fine-tuning (SFT) maximizes the log probabilities of the next token " + }, + { + "bbox": [ + 67, + 658, + 541, + 696 + ], + "type": "inline_equation", + "content": "y_{i}" + }, + { + "bbox": [ + 67, + 658, + 541, + 696 + ], + "type": "text", + "content": " given the input prompt " + }, + { + "bbox": [ + 67, + 658, + 541, + 696 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 67, + 658, + 541, + 696 + ], + "type": "text", + "content": " and previously generated tokens " + }, + { + "bbox": [ + 67, + 658, + 541, + 696 + ], + "type": "inline_equation", + "content": "y_{< i}" + }, + { + "bbox": [ + 67, + 658, + 541, + 696 + ], + "type": "text", + "content": ". Training the policy model " + }, + { + "bbox": [ + 67, + 658, + 541, + 696 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 67, + 658, + 541, + 696 + ], + "type": "text", + "content": " generally includes the steps to minimize the following loss function:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 199, + 702, + 541, + 736 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 199, + 702, + 541, + 736 + ], + "spans": [ + { + "bbox": [ + 199, + 702, + 541, + 736 + ], + "type": "interline_equation", + "content": "L _ {\\mathrm {S F T}} (\\theta) = \\mathbb {E} _ {x, y \\sim \\mathcal {D}} \\left[ \\sum_ {i} ^ {T} - \\frac {1}{T} \\log \\left(\\pi_ {\\theta} \\left(y _ {i} \\mid y _ {< i}, x\\right)\\right) \\right], \\tag {2}", + "image_path": "0393d3443e276749833bf066a6b2bb5413d444ecafbcb3ac62400658a0872798.jpg" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 761 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 82, + 541, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 82, + 541, + 156 + ], + "spans": [ + { + "bbox": [ + 67, + 82, + 541, + 156 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 82, + 541, + 156 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 67, + 82, + 541, + 156 + ], + "type": "text", + "content": " is the SFT dataset that comprises inputs " + }, + { + "bbox": [ + 67, + 82, + 541, + 156 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 67, + 82, + 541, + 156 + ], + "type": "text", + "content": " and ground truth labels " + }, + { + "bbox": [ + 67, + 82, + 541, + 156 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 67, + 82, + 541, + 156 + ], + "type": "text", + "content": ". The ground truth labels can be either human-written or AI-generated reasoning process and answer response. The loss is equivalent to the next token prediction objective where the prompt input tokens are masked out and do not contribute to the loss. SFT is the often the default first (or only) step to train a base LLM to produce reasoning chains in zero-shot settings. SFT has also popularly used as an effective way to train smaller LLMs to imitate outputs generated by larger, more powerful LLMs, in a process known as knowledge distillation (Xu et al., 2024c)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 166, + 282, + 178 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 166, + 282, + 178 + ], + "spans": [ + { + "bbox": [ + 68, + 166, + 282, + 178 + ], + "type": "text", + "content": "4.1.2 Reinforcement Learning for Reasoning" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 186, + 541, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 186, + 541, + 248 + ], + "spans": [ + { + "bbox": [ + 67, + 186, + 541, + 248 + ], + "type": "text", + "content": "Stiannon et al. (2020) and Ouyang et al. (2022) pioneered the application of reinforcement learning (RL), particularly proximal policy optimization (PPO) (Schulman et al., 2017), to improve not only reasoning capabilities but also the helpfulness and harmlessness of LLMs. Their work catalyzed a wave of innovations in preference learning and RL-based optimization techniques, as evidenced by subsequent studies (Rafailov et al., 2023; Ahmadian et al., 2024; OpenAI et al., 2024; DeepSeek-AI et al., 2025; Ramesh et al., 2024)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 258, + 541, + 283 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 258, + 541, + 283 + ], + "spans": [ + { + "bbox": [ + 68, + 258, + 541, + 283 + ], + "type": "text", + "content": "Markov decision process. Most reinforcement learning (RL) approaches model text generation as a Markov Decision Process (MDP). In this framework, the process is defined by the following components:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 296, + 444, + 385 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 71, + 296, + 160, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 296, + 160, + 307 + ], + "spans": [ + { + "bbox": [ + 71, + 296, + 160, + 307 + ], + "type": "text", + "content": "A set of states " + }, + { + "bbox": [ + 71, + 296, + 160, + 307 + ], + "type": "inline_equation", + "content": "S" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 71, + 316, + 167, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 316, + 167, + 327 + ], + "spans": [ + { + "bbox": [ + 71, + 316, + 167, + 327 + ], + "type": "text", + "content": "- A set of actions " + }, + { + "bbox": [ + 71, + 316, + 167, + 327 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 71, + 316, + 167, + 327 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 335, + 444, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 335, + 444, + 346 + ], + "spans": [ + { + "bbox": [ + 70, + 335, + 444, + 346 + ], + "type": "text", + "content": "- A state-action transition distribution " + }, + { + "bbox": [ + 70, + 335, + 444, + 346 + ], + "type": "inline_equation", + "content": "P(s_{t + 1}|s_t,a_t)" + }, + { + "bbox": [ + 70, + 335, + 444, + 346 + ], + "type": "text", + "content": " controlled by the environment," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 354, + 376, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 354, + 376, + 365 + ], + "spans": [ + { + "bbox": [ + 70, + 354, + 376, + 365 + ], + "type": "text", + "content": "- A reward function " + }, + { + "bbox": [ + 70, + 354, + 376, + 365 + ], + "type": "inline_equation", + "content": "R(s_{t},a_{t})\\in \\mathbb{R}" + }, + { + "bbox": [ + 70, + 354, + 376, + 365 + ], + "type": "text", + "content": " that provides a scalar reward, and" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 374, + 444, + 385 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 374, + 444, + 385 + ], + "spans": [ + { + "bbox": [ + 70, + 374, + 444, + 385 + ], + "type": "text", + "content": "- A policy " + }, + { + "bbox": [ + 70, + 374, + 444, + 385 + ], + "type": "inline_equation", + "content": "\\pi (a_t|s_t)" + }, + { + "bbox": [ + 70, + 374, + 444, + 385 + ], + "type": "text", + "content": ", which determines the actions to take based on the current state." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 67, + 400, + 541, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 400, + 541, + 545 + ], + "spans": [ + { + "bbox": [ + 67, + 400, + 541, + 545 + ], + "type": "text", + "content": "At each time step " + }, + { + "bbox": [ + 67, + 400, + 541, + 545 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 67, + 400, + 541, + 545 + ], + "type": "text", + "content": ", for a given state " + }, + { + "bbox": [ + 67, + 400, + 541, + 545 + ], + "type": "inline_equation", + "content": "s_t \\in S" + }, + { + "bbox": [ + 67, + 400, + 541, + 545 + ], + "type": "text", + "content": ", the agent selects an action " + }, + { + "bbox": [ + 67, + 400, + 541, + 545 + ], + "type": "inline_equation", + "content": "a_t" + }, + { + "bbox": [ + 67, + 400, + 541, + 545 + ], + "type": "text", + "content": " and transitions to a new state " + }, + { + "bbox": [ + 67, + 400, + 541, + 545 + ], + "type": "inline_equation", + "content": "s_{t+1}" + }, + { + "bbox": [ + 67, + 400, + 541, + 545 + ], + "type": "text", + "content": ", receiving a reward " + }, + { + "bbox": [ + 67, + 400, + 541, + 545 + ], + "type": "inline_equation", + "content": "R(s_t, a_t)" + }, + { + "bbox": [ + 67, + 400, + 541, + 545 + ], + "type": "text", + "content": " from the environment. The set of available actions at state " + }, + { + "bbox": [ + 67, + 400, + 541, + 545 + ], + "type": "inline_equation", + "content": "s_t" + }, + { + "bbox": [ + 67, + 400, + 541, + 545 + ], + "type": "text", + "content": " may be restricted to a subset of " + }, + { + "bbox": [ + 67, + 400, + 541, + 545 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 67, + 400, + 541, + 545 + ], + "type": "text", + "content": ", denoted " + }, + { + "bbox": [ + 67, + 400, + 541, + 545 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_{s_t}" + }, + { + "bbox": [ + 67, + 400, + 541, + 545 + ], + "type": "text", + "content": " (i.e., " + }, + { + "bbox": [ + 67, + 400, + 541, + 545 + ], + "type": "inline_equation", + "content": "a_t \\in \\mathcal{A}_{s_t}" + }, + { + "bbox": [ + 67, + 400, + 541, + 545 + ], + "type": "text", + "content": "). In the context of autoregressive language modeling with LLMs, generally the next token depends on all the previous tokens. As such, in order to apply RL training for LLMs, one needs to define the states and actions of the problem such that they both satisfy the temporal dependency constraint of the language modeling task as well as the Markov property. One common approach is to define that the current state " + }, + { + "bbox": [ + 67, + 400, + 541, + 545 + ], + "type": "inline_equation", + "content": "s_t" + }, + { + "bbox": [ + 67, + 400, + 541, + 545 + ], + "type": "text", + "content": " fully encapsulates all relevant information about the environment, in other words all previous tokens. This means the next state " + }, + { + "bbox": [ + 67, + 400, + 541, + 545 + ], + "type": "inline_equation", + "content": "s_{t+1}" + }, + { + "bbox": [ + 67, + 400, + 541, + 545 + ], + "type": "text", + "content": " depends solely on the current state " + }, + { + "bbox": [ + 67, + 400, + 541, + 545 + ], + "type": "inline_equation", + "content": "s_t \\in S" + }, + { + "bbox": [ + 67, + 400, + 541, + 545 + ], + "type": "text", + "content": " and the chosen action " + }, + { + "bbox": [ + 67, + 400, + 541, + 545 + ], + "type": "inline_equation", + "content": "a_t \\in \\mathcal{A}_{s_t}" + }, + { + "bbox": [ + 67, + 400, + 541, + 545 + ], + "type": "text", + "content": ". In this way, the current state no longer needs to retrieve information from the previous states to decide the next action. As such, the state transition is agnostic to the history or previous states and actions. Within this MDP framework, the goal of RL is to learn a policy model that selects optimal actions by maximizing the expected cumulative rewards (Eq. 1)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 557, + 541, + 733 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 70, + 557, + 541, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 557, + 541, + 641 + ], + "spans": [ + { + "bbox": [ + 70, + 557, + 541, + 641 + ], + "type": "text", + "content": "- Action := token: Actions are defined at the token level, making the action space " + }, + { + "bbox": [ + 70, + 557, + 541, + 641 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_{s_t}" + }, + { + "bbox": [ + 70, + 557, + 541, + 641 + ], + "type": "text", + "content": " is finite and equal in size to the vocabulary. The state " + }, + { + "bbox": [ + 70, + 557, + 541, + 641 + ], + "type": "inline_equation", + "content": "s_t" + }, + { + "bbox": [ + 70, + 557, + 541, + 641 + ], + "type": "text", + "content": " consists of all preceding tokens, including the input prompt and previously generated output tokens. The next state " + }, + { + "bbox": [ + 70, + 557, + 541, + 641 + ], + "type": "inline_equation", + "content": "s_{t+1}" + }, + { + "bbox": [ + 70, + 557, + 541, + 641 + ], + "type": "text", + "content": " is defined as the concatenation of the current state " + }, + { + "bbox": [ + 70, + 557, + 541, + 641 + ], + "type": "inline_equation", + "content": "s_t" + }, + { + "bbox": [ + 70, + 557, + 541, + 641 + ], + "type": "text", + "content": " and the action taken " + }, + { + "bbox": [ + 70, + 557, + 541, + 641 + ], + "type": "inline_equation", + "content": "a_t" + }, + { + "bbox": [ + 70, + 557, + 541, + 641 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 70, + 557, + 541, + 641 + ], + "type": "inline_equation", + "content": "s_{t+1} \\coloneqq [s_t; a_t]" + }, + { + "bbox": [ + 70, + 557, + 541, + 641 + ], + "type": "text", + "content": ". This category of methods defines rewards and related measures, such as values and advantages, at the token level. Works adopting this approach include most standard RLHF methods (Ouyang et al., 2022; Zheng et al., 2023b; Lee et al., 2023) as well as more recent fine-grained process-rewarding approaches (Yuan et al., 2024b; Cui et al., 2025)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 648, + 541, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 648, + 541, + 733 + ], + "spans": [ + { + "bbox": [ + 70, + 648, + 541, + 733 + ], + "type": "text", + "content": "- **Action := token chunk (step):** In this category of methods, actions are defined at the level of token chunks that semantically represent a reasoning step, separated by a special delimiter. As a result, the action space is infinite. The state " + }, + { + "bbox": [ + 70, + 648, + 541, + 733 + ], + "type": "inline_equation", + "content": "s_t" + }, + { + "bbox": [ + 70, + 648, + 541, + 733 + ], + "type": "text", + "content": " consists of the prompt and the output tokens generated in previous reasoning steps. Rewards, value scores, and advantages are computed at the step level, with all tokens within a reasoning step " + }, + { + "bbox": [ + 70, + 648, + 541, + 733 + ], + "type": "inline_equation", + "content": "a_t" + }, + { + "bbox": [ + 70, + 648, + 541, + 733 + ], + "type": "text", + "content": " sharing the same step-level score. This approach is particularly prominent in process supervision pipelines, as exemplified by DeepSeek-Math and VinePPO (Shao et al., 2024; Kazemnejad et al., 2024)." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 82, + 79, + 530, + 243 + ], + "blocks": [ + { + "bbox": [ + 82, + 79, + 530, + 243 + ], + "lines": [ + { + "bbox": [ + 82, + 79, + 530, + 243 + ], + "spans": [ + { + "bbox": [ + 82, + 79, + 530, + 243 + ], + "type": "table", + "html": "
TypeState stAction atAction spaceExample work
Action := tokenAll previous to-kens (prompt and current response tokens)one tokenfinite, vocabulary size(Ouyang et al., 2022; Zheng et al., 2023b; Lee et al., 2023)
Action := stepAll previous tokens of prompt and previous stepsa chunk of tokens representing a “reasoning step”, separated by a special delimiterinfinite(Shao et al., 2024) (process supervision), (Kazemnejad et al., 2024)
Action := full re-sponsePromptentire responseinfinite(Shao et al., 2024) (outcome supervision), (DeepSeek-AI et al., 2025)
", + "image_path": "d2d3ae2651fba2b47af954ed5cd41fbafa1fee7fb129a2b951985ad6c1721ac0.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 126, + 251, + 483, + 263 + ], + "lines": [ + { + "bbox": [ + 126, + 251, + 483, + 263 + ], + "spans": [ + { + "bbox": [ + 126, + 251, + 483, + 263 + ], + "type": "text", + "content": "Table 5: Definitions of MDP states and actions across different training schemes." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 70, + 285, + 541, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 285, + 541, + 430 + ], + "spans": [ + { + "bbox": [ + 70, + 285, + 541, + 430 + ], + "type": "text", + "content": "- Action := full response: In this category, the entire response—comprising all output tokens—is treated as a single action. This transforms the reasoning problem into a one-step MDP with an infinite action space. This approach has been recently popularized by DeepSeek-R1 (DeepSeek-AI et al., 2025) and previously by DeepSeek-Math (outcome supervision) (Shao et al., 2024). A unique aspect of this formulation is that the full response may semantically include multiple reasoning steps, such as spontaneous backtracking and self-evaluation behaviors, as observed in DeepSeek-R1 (DeepSeek-AI et al., 2025). Regardless of the number of humanly recognizable reasoning steps within the response, the entire output is still considered a single action. To assign token-level value scores, rewards, and advantages, Shao et al. (2024); DeepSeek-AI et al. (2025) compute these values based on the full response " + }, + { + "bbox": [ + 70, + 285, + 541, + 430 + ], + "type": "inline_equation", + "content": "a_{t}" + }, + { + "bbox": [ + 70, + 285, + 541, + 430 + ], + "type": "text", + "content": " and then distribute them uniformly across all tokens, similar to the step-level action setting. This formulation aligns with the concept of \"bandit\" prediction (with infinite action space) in REINFORCE-style RL (Nguyen et al., 2017; Kreutzer et al., 2017)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 446, + 541, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 446, + 541, + 555 + ], + "spans": [ + { + "bbox": [ + 67, + 446, + 541, + 555 + ], + "type": "text", + "content": "Proximal Policy Optimization (PPO). As one of the primary variants of policy gradient methods, PPO has remained a popular and widely used RL algorithm (Schulman et al., 2017). To train the policy " + }, + { + "bbox": [ + 67, + 446, + 541, + 555 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 67, + 446, + 541, + 555 + ], + "type": "text", + "content": ", PPO utilizes two additional models: the reference model " + }, + { + "bbox": [ + 67, + 446, + 541, + 555 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta_{\\mathrm{ref}}}" + }, + { + "bbox": [ + 67, + 446, + 541, + 555 + ], + "type": "text", + "content": ", which represents the initial state of the policy, and the value model " + }, + { + "bbox": [ + 67, + 446, + 541, + 555 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 67, + 446, + 541, + 555 + ], + "type": "text", + "content": ", which estimates the state value " + }, + { + "bbox": [ + 67, + 446, + 541, + 555 + ], + "type": "inline_equation", + "content": "V(s_{t})" + }, + { + "bbox": [ + 67, + 446, + 541, + 555 + ], + "type": "text", + "content": ". PPO begins by sampling a state-action trajectory " + }, + { + "bbox": [ + 67, + 446, + 541, + 555 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 67, + 446, + 541, + 555 + ], + "type": "text", + "content": " with consecutive state-action pairs " + }, + { + "bbox": [ + 67, + 446, + 541, + 555 + ], + "type": "inline_equation", + "content": "s_{t+1} \\sim (s_{t}, a_{t})" + }, + { + "bbox": [ + 67, + 446, + 541, + 555 + ], + "type": "text", + "content": ", then collects the respective intermediate or process reward (if available) and final (outcome) reward. Then, it computes the advantage " + }, + { + "bbox": [ + 67, + 446, + 541, + 555 + ], + "type": "inline_equation", + "content": "A(s_{t}, a_{t})" + }, + { + "bbox": [ + 67, + 446, + 541, + 555 + ], + "type": "text", + "content": " of each action " + }, + { + "bbox": [ + 67, + 446, + 541, + 555 + ], + "type": "inline_equation", + "content": "a_{t}" + }, + { + "bbox": [ + 67, + 446, + 541, + 555 + ], + "type": "text", + "content": " given the current state " + }, + { + "bbox": [ + 67, + 446, + 541, + 555 + ], + "type": "inline_equation", + "content": "s_{t}" + }, + { + "bbox": [ + 67, + 446, + 541, + 555 + ], + "type": "text", + "content": ", which is defined as the relative strength of that specific action " + }, + { + "bbox": [ + 67, + 446, + 541, + 555 + ], + "type": "inline_equation", + "content": "a_{t}" + }, + { + "bbox": [ + 67, + 446, + 541, + 555 + ], + "type": "text", + "content": " compared to the probability-weighted actions that the policy could probably have taken from " + }, + { + "bbox": [ + 67, + 446, + 541, + 555 + ], + "type": "inline_equation", + "content": "s_{t}" + }, + { + "bbox": [ + 67, + 446, + 541, + 555 + ], + "type": "text", + "content": ". The advantage is formulated as" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 177, + 564, + 541, + 578 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 177, + 564, + 541, + 578 + ], + "spans": [ + { + "bbox": [ + 177, + 564, + 541, + 578 + ], + "type": "interline_equation", + "content": "A \\left(s _ {t}, a _ {t}\\right) := Q \\left(s _ {t}, a _ {t}\\right) - V \\left(s _ {t}\\right) := Q \\left(s _ {t}, a _ {t}\\right) - \\mathbb {E} _ {a _ {t} ^ {\\prime}} \\left[ Q \\left(s _ {t}, a _ {t} ^ {\\prime}\\right) \\right], \\tag {3}", + "image_path": "43a26766ab3cc5994e2bc151322aa147605a9f885631b6df81f8c6dd61cc72f8.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 588, + 541, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 588, + 541, + 660 + ], + "spans": [ + { + "bbox": [ + 67, + 588, + 541, + 660 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 588, + 541, + 660 + ], + "type": "inline_equation", + "content": "Q(s_{t},a_{t})" + }, + { + "bbox": [ + 67, + 588, + 541, + 660 + ], + "type": "text", + "content": " represents the expected cumulative total reward that the policy is expected to obtain if it takes action " + }, + { + "bbox": [ + 67, + 588, + 541, + 660 + ], + "type": "inline_equation", + "content": "a_{t}" + }, + { + "bbox": [ + 67, + 588, + 541, + 660 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 67, + 588, + 541, + 660 + ], + "type": "inline_equation", + "content": "s_{t}" + }, + { + "bbox": [ + 67, + 588, + 541, + 660 + ], + "type": "text", + "content": " and continue to follow the current policy, while " + }, + { + "bbox": [ + 67, + 588, + 541, + 660 + ], + "type": "inline_equation", + "content": "V(s_{t})" + }, + { + "bbox": [ + 67, + 588, + 541, + 660 + ], + "type": "text", + "content": " denotes the expected total rewards obtainable from state " + }, + { + "bbox": [ + 67, + 588, + 541, + 660 + ], + "type": "inline_equation", + "content": "s_{t}" + }, + { + "bbox": [ + 67, + 588, + 541, + 660 + ], + "type": "text", + "content": ", known as the state value. The state value is equivalent to the expected value of " + }, + { + "bbox": [ + 67, + 588, + 541, + 660 + ], + "type": "inline_equation", + "content": "Q(s_{t},a_{t}^{\\prime})" + }, + { + "bbox": [ + 67, + 588, + 541, + 660 + ], + "type": "text", + "content": " marginalized over all probable actions the current policy " + }, + { + "bbox": [ + 67, + 588, + 541, + 660 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 67, + 588, + 541, + 660 + ], + "type": "text", + "content": " may take from " + }, + { + "bbox": [ + 67, + 588, + 541, + 660 + ], + "type": "inline_equation", + "content": "s_{t}" + }, + { + "bbox": [ + 67, + 588, + 541, + 660 + ], + "type": "text", + "content": ". If " + }, + { + "bbox": [ + 67, + 588, + 541, + 660 + ], + "type": "inline_equation", + "content": "A(s_{t},a_{t}) > 0" + }, + { + "bbox": [ + 67, + 588, + 541, + 660 + ], + "type": "text", + "content": ", the action " + }, + { + "bbox": [ + 67, + 588, + 541, + 660 + ], + "type": "inline_equation", + "content": "a_{t}" + }, + { + "bbox": [ + 67, + 588, + 541, + 660 + ], + "type": "text", + "content": " is encouraged, conversely, if " + }, + { + "bbox": [ + 67, + 588, + 541, + 660 + ], + "type": "inline_equation", + "content": "A(s_{t},a_{t}) < 0" + }, + { + "bbox": [ + 67, + 588, + 541, + 660 + ], + "type": "text", + "content": ", the action " + }, + { + "bbox": [ + 67, + 588, + 541, + 660 + ], + "type": "inline_equation", + "content": "a_{t}" + }, + { + "bbox": [ + 67, + 588, + 541, + 660 + ], + "type": "text", + "content": " is discouraged. After computing the advantages, PPO optimizes the policy " + }, + { + "bbox": [ + 67, + 588, + 541, + 660 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 67, + 588, + 541, + 660 + ], + "type": "text", + "content": " according to the following loss function." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 82, + 670, + 541, + 703 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 670, + 541, + 703 + ], + "spans": [ + { + "bbox": [ + 82, + 670, + 541, + 703 + ], + "type": "interline_equation", + "content": "L _ {\\mathrm {P P O}} (\\theta) = \\mathbb {E} _ {\\tau \\sim \\pi_ {\\theta_ {0}}, P} - \\frac {1}{T} \\left[ \\sum_ {t = 0} ^ {T} \\operatorname {m i n} \\left(\\frac {\\pi_ {\\theta} (a _ {t} | s _ {t})}{\\pi_ {\\theta_ {o}} (a _ {t} | s _ {t})} A (s _ {t}, a _ {t}), \\operatorname {c l i p} \\left(\\frac {\\pi_ {\\theta} (a _ {t} | s _ {t})}{\\pi_ {\\theta_ {o}} (a _ {t} | s _ {t})}, 1 - \\epsilon , 1 + \\epsilon\\right) A (s _ {t}, a _ {t})\\right) \\right], \\tag {4}", + "image_path": "6d53eb6e4bfec2bd5f35a0eaeafe8115a0a508071d9e7276eeddfecbecd8c818.jpg" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 69, + 712, + 541, + 731 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 712, + 541, + 731 + ], + "spans": [ + { + "bbox": [ + 69, + 712, + 541, + 731 + ], + "type": "text", + "content": "5The O-1 model series (OpenAI et al., 2024) also exhibit such behaviors, though the training approach for O-1 remains undisclosed." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 82, + 542, + 184 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 82, + 542, + 184 + ], + "spans": [ + { + "bbox": [ + 67, + 82, + 542, + 184 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 82, + 542, + 184 + ], + "type": "inline_equation", + "content": "t \\in [0, T]" + }, + { + "bbox": [ + 67, + 82, + 542, + 184 + ], + "type": "text", + "content": " is a time step within trajectory " + }, + { + "bbox": [ + 67, + 82, + 542, + 184 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 67, + 82, + 542, + 184 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 67, + 82, + 542, + 184 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta_o}" + }, + { + "bbox": [ + 67, + 82, + 542, + 184 + ], + "type": "text", + "content": " is the fixed policy from previous episode or iteration, and " + }, + { + "bbox": [ + 67, + 82, + 542, + 184 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 67, + 82, + 542, + 184 + ], + "type": "text", + "content": " is the transition distribution. The clip function, applied to the probability ratio " + }, + { + "bbox": [ + 67, + 82, + 542, + 184 + ], + "type": "inline_equation", + "content": "\\frac{\\pi_{\\theta}(a_t|s_t)}{\\pi_{\\theta_o}(a_t|s_t)}" + }, + { + "bbox": [ + 67, + 82, + 542, + 184 + ], + "type": "text", + "content": ", ensures that the policy does not deviate too drastically or rapidly from its previous version. This also helps prevent catastrophic failure or suboptimal local solutions. Additionally, a KL divergence term " + }, + { + "bbox": [ + 67, + 82, + 542, + 184 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{\\mathrm{KL}}(\\pi_{\\theta}||\\pi_{\\theta_{\\mathrm{ref}}})" + }, + { + "bbox": [ + 67, + 82, + 542, + 184 + ], + "type": "text", + "content": " is often incorporated into the loss function to constrain exploration during the later stages of training. " + }, + { + "bbox": [ + 67, + 82, + 542, + 184 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta_{\\mathrm{ref}}}" + }, + { + "bbox": [ + 67, + 82, + 542, + 184 + ], + "type": "text", + "content": " is often a fixed initial reference policy that we do not want our policy to deviate too much from, while " + }, + { + "bbox": [ + 67, + 82, + 542, + 184 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta_o}" + }, + { + "bbox": [ + 67, + 82, + 542, + 184 + ], + "type": "text", + "content": " is a snapshot of the current policy from the previous iteration which is updated regularly. Throughout the training process, both the policy " + }, + { + "bbox": [ + 67, + 82, + 542, + 184 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 67, + 82, + 542, + 184 + ], + "type": "text", + "content": " and value model " + }, + { + "bbox": [ + 67, + 82, + 542, + 184 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 67, + 82, + 542, + 184 + ], + "type": "text", + "content": " are iteratively updated." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 193, + 542, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 193, + 542, + 232 + ], + "spans": [ + { + "bbox": [ + 67, + 193, + 542, + 232 + ], + "type": "text", + "content": "REINFORCE & RLOO. REINFORCE is another popular policy gradient method (Sutton, 2018; Williams, 1992; Nguyen et al., 2017; Kreutzer et al., 2017) for RL. This method seeks to optimize the reward weighted objective of the entire response as:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 170, + 236, + 541, + 251 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 236, + 541, + 251 + ], + "spans": [ + { + "bbox": [ + 170, + 236, + 541, + 251 + ], + "type": "interline_equation", + "content": "L _ {\\mathrm {R E I N F O R C E}} (\\theta) = \\mathbb {E} _ {x \\sim \\mathcal {D}, y \\sim \\pi_ {\\theta} (\\cdot | x)} [ (R (y, x) - b) \\nabla_ {\\pi_ {\\theta}} \\log \\pi_ {\\theta} (y | x) ] \\qquad (5)", + "image_path": "39de4b7e43614dd9fd6b20b5ea17613653be0964114d78d15e240a8be4b40062.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 256, + 542, + 294 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 256, + 542, + 294 + ], + "spans": [ + { + "bbox": [ + 67, + 256, + 542, + 294 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 256, + 542, + 294 + ], + "type": "inline_equation", + "content": "R(y, x)" + }, + { + "bbox": [ + 67, + 256, + 542, + 294 + ], + "type": "text", + "content": " represents the final reward for output " + }, + { + "bbox": [ + 67, + 256, + 542, + 294 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 67, + 256, + 542, + 294 + ], + "type": "text", + "content": " given input " + }, + { + "bbox": [ + 67, + 256, + 542, + 294 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 67, + 256, + 542, + 294 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 256, + 542, + 294 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 67, + 256, + 542, + 294 + ], + "type": "text", + "content": " is a baseline term introduced to reduce the variance of the gradient estimates. A widely used choice for " + }, + { + "bbox": [ + 67, + 256, + 542, + 294 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 67, + 256, + 542, + 294 + ], + "type": "text", + "content": " is the moving average of all rewards observed during training (Williams, 1992; Ahmadian et al., 2024)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 298, + 542, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 298, + 542, + 335 + ], + "spans": [ + { + "bbox": [ + 67, + 298, + 542, + 335 + ], + "type": "text", + "content": "Recently, the REINFORCE Leave-One-Out (RLOO) method (Kool et al., 2019; Ahmadian et al., 2024) has been proposed, which replaces the traditional baseline calculation with the leave-one-out average of trajectory rewards obtained through Monte Carlo (MC) sampling, as shown in Eq. 6" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 165, + 343, + 542, + 377 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 343, + 542, + 377 + ], + "spans": [ + { + "bbox": [ + 165, + 343, + 542, + 377 + ], + "type": "interline_equation", + "content": "L _ {\\mathrm {R L O O}} (\\theta) = \\frac {1}{k} \\sum_ {i = 1} ^ {k} [ R (y _ {i}, x) - \\frac {1}{k - 1} \\sum_ {j \\neq i} R (y _ {j}, x) ] \\nabla_ {\\pi_ {\\theta}} \\log \\pi_ {\\theta} (y _ {i} | x) \\tag {6}", + "image_path": "b7ccdc1523c5cda97395d8e1c071124159cd2ad9b45eecf6b0cf01c435760963.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 382, + 544, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 382, + 544, + 431 + ], + "spans": [ + { + "bbox": [ + 67, + 382, + 544, + 431 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 382, + 544, + 431 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 67, + 382, + 544, + 431 + ], + "type": "text", + "content": " denotes the number of Monte Carlo samples. Unlike PPO, these algorithms do not rely on a parameterized value function (critic model) and instead depend solely on observed rewards. These methods share similarities with approaches such as Group-Relative Policy Optimization (GRPO) (Ramesh et al., 2024) and VinePPO (Kazemnejad et al., 2024), which will be discussed in detail below." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 442, + 543, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 442, + 543, + 529 + ], + "spans": [ + { + "bbox": [ + 67, + 442, + 543, + 529 + ], + "type": "text", + "content": "Group-Relative Policy Optimization (GRPO). This algorithm has gained recent popularity through DeepSeek-R1 DeepSeek-AI et al. (2025), though it was also explored in earlier studies such as (Shao et al., 2024; Yang et al., 2024b;a; Team, 2024). It employs the same clipped surrogate objective as PPO, defined in Eq. 4 (Schulman et al., 2017). However, unlike PPO, which uses a parameterized value model to estimate the advantage " + }, + { + "bbox": [ + 67, + 442, + 543, + 529 + ], + "type": "inline_equation", + "content": "A(s_{t},a_{t})" + }, + { + "bbox": [ + 67, + 442, + 543, + 529 + ], + "type": "text", + "content": ", this approach samples a group " + }, + { + "bbox": [ + 67, + 442, + 543, + 529 + ], + "type": "inline_equation", + "content": "G = [o_{1},o_{2},\\dots,o_{g}]" + }, + { + "bbox": [ + 67, + 442, + 543, + 529 + ], + "type": "text", + "content": " of Monte-Carlo outputs for a given input " + }, + { + "bbox": [ + 67, + 442, + 543, + 529 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 67, + 442, + 543, + 529 + ], + "type": "text", + "content": ". It then computes the corresponding rewards " + }, + { + "bbox": [ + 67, + 442, + 543, + 529 + ], + "type": "inline_equation", + "content": "R = [r_1,r_2,\\dots,r_g]" + }, + { + "bbox": [ + 67, + 442, + 543, + 529 + ], + "type": "text", + "content": ", and determines the advantage of each output " + }, + { + "bbox": [ + 67, + 442, + 543, + 529 + ], + "type": "inline_equation", + "content": "o_i" + }, + { + "bbox": [ + 67, + 442, + 543, + 529 + ], + "type": "text", + "content": " as the group-normalized reward" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 200, + 533, + 541, + 559 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 200, + 533, + 541, + 559 + ], + "spans": [ + { + "bbox": [ + 200, + 533, + 541, + 559 + ], + "type": "interline_equation", + "content": "A _ {\\mathrm {G R P O}} (s _ {i, t}, a _ {i, t}) = A _ {\\mathrm {G R P O}} (o _ {i}) = \\frac {r _ {i} - m e a n (R)}{s t d (R)}. \\tag {7}", + "image_path": "4aa0c61186f0c779a1bd0502eb36815676a9150805a28fd149dc247fb353c742.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 564, + 451, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 564, + 451, + 578 + ], + "spans": [ + { + "bbox": [ + 69, + 564, + 451, + 578 + ], + "type": "text", + "content": "Then, the algorithm optimizes the policy " + }, + { + "bbox": [ + 69, + 564, + 451, + 578 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 69, + 564, + 451, + 578 + ], + "type": "text", + "content": " by minimizing the following loss function." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 122, + 584, + 541, + 647 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 584, + 541, + 647 + ], + "spans": [ + { + "bbox": [ + 122, + 584, + 541, + 647 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} L _ {\\mathrm {G R P O}} (\\theta) = - \\frac {1}{| G |} \\sum_ {i} ^ {| G |} \\frac {1}{T _ {i}} \\sum_ {t} ^ {T _ {i}} m i n \\left\\{\\frac {\\pi_ {\\theta} (a _ {i , t} | s _ {i , t})}{\\pi_ {\\theta_ {o}} (a _ {i , t} | s _ {i , t})} A _ {\\mathrm {G R P O}} (s _ {i, t}, a _ {i, t}), \\right. \\\\ \\left. \\operatorname {c l i p} \\left(\\frac {\\pi_ {\\theta} \\left(a _ {i , t} \\mid s _ {i , t}\\right)}{\\pi_ {\\theta_ {o}} \\left(a _ {i , t} \\mid s _ {i , t}\\right)}, 1 - \\epsilon , 1 + \\epsilon\\right) A _ {\\mathrm {G R P O}} \\left(s _ {i, t}, a _ {i, t}\\right) \\right\\} \\tag {8} \\\\ \\end{array}", + "image_path": "1e801304742ff47f8641315d48562cda363d39fe3b0246405be5a97501fa27ce.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 651, + 542, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 651, + 542, + 677 + ], + "spans": [ + { + "bbox": [ + 67, + 651, + 542, + 677 + ], + "type": "text", + "content": "Variants of GRPO, such as DAPO (Yu et al., 2025), have also been introduced to alleviate issues with GRPO like length bias and inappropriate penalties for responses that exceed the context length." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 688, + 198, + 700 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 688, + 198, + 700 + ], + "spans": [ + { + "bbox": [ + 69, + 688, + 198, + 700 + ], + "type": "text", + "content": "4.1.3 Preference Learning" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 67, + 708, + 542, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 708, + 542, + 734 + ], + "spans": [ + { + "bbox": [ + 67, + 708, + 542, + 734 + ], + "type": "text", + "content": "Preference learning, particularly learning from human feedback, is a widely used post-pretraining alignment stage for LLMs. Its goal is to encourage the generation of responses that align with human preferences or" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 82, + 541, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 82, + 541, + 166 + ], + "spans": [ + { + "bbox": [ + 67, + 82, + 541, + 166 + ], + "type": "text", + "content": "desired values, such as helpfulness or harmlessness (Ouyang et al., 2022; Bai et al., 2022; Ganguli et al., 2022). The data collection process for this stage typically involves prompting an unaligned LLM to generate multiple responses for a given input. Human annotators are then presented with pairs of responses and asked to select the preferred one. The resulting preference dataset is used to train a reward model. This reward model subsequently provides online reward scores for policy trajectories during PPO training, a process commonly referred to as reinforcement learning from human feedback or RLHF (Schulman et al., 2017; Ouyang et al., 2022; Touvron et al., 2023), as well as AI feedback (Lee et al., 2023)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 171, + 541, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 171, + 541, + 256 + ], + "spans": [ + { + "bbox": [ + 67, + 171, + 541, + 256 + ], + "type": "text", + "content": "Preference learning has evolved beyond conventional reinforcement learning (RL)-based methodologies with the introduction of Direct Preference Optimization (DPO) (Rafailov et al., 2023) and its subsequent variants (Ethayarajh et al., 2024; Lai et al., 2024; Hong et al., 2024; Saeidi et al., 2024; Meng et al., 2024; Azar et al., 2024). DPO proposes using the policy language model itself to directly model human reward preferences from the preference dataset. This formulation eliminates the need for a separately trained reward model, instead optimizing the policy on the preference dataset with a simple binary classification loss. Formally, the policy " + }, + { + "bbox": [ + 67, + 171, + 541, + 256 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 67, + 171, + 541, + 256 + ], + "type": "text", + "content": " is optimized using a preference dataset " + }, + { + "bbox": [ + 67, + 171, + 541, + 256 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 67, + 171, + 541, + 256 + ], + "type": "text", + "content": " by minimizing the loss function:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 150, + 259, + 541, + 287 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 259, + 541, + 287 + ], + "spans": [ + { + "bbox": [ + 150, + 259, + 541, + 287 + ], + "type": "interline_equation", + "content": "L _ {\\mathrm {D P O}} (\\theta) = - \\mathbb {E} _ {(x, y _ {w}, y _ {l}) \\sim \\mathcal {D}} \\left[ \\log \\sigma \\left(\\beta \\log \\frac {\\pi_ {\\theta} (y _ {w} | x)}{\\pi_ {\\mathrm {r e f}} (y _ {w} | x)} - \\beta \\log \\frac {\\pi_ {\\theta} (y _ {l} | x)}{\\pi_ {\\mathrm {r e f}} (y _ {l} | x)}\\right) \\right], \\tag {9}", + "image_path": "a33ad402bcef5028d88de2930c410286783822bc4e894664f1cd2dfc4b19da46.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 290, + 541, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 290, + 541, + 373 + ], + "spans": [ + { + "bbox": [ + 67, + 290, + 541, + 373 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 290, + 541, + 373 + ], + "type": "inline_equation", + "content": "y_{w}" + }, + { + "bbox": [ + 67, + 290, + 541, + 373 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 290, + 541, + 373 + ], + "type": "inline_equation", + "content": "y_{l}" + }, + { + "bbox": [ + 67, + 290, + 541, + 373 + ], + "type": "text", + "content": " represent the winning (chosen) and losing (rejected) outputs for input " + }, + { + "bbox": [ + 67, + 290, + 541, + 373 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 67, + 290, + 541, + 373 + ], + "type": "text", + "content": ", respectively. DPO has gained popularity due to its simplicity and stability, bypassing the engineering complexity and challenges associated with PPO-based techniques. However, DPO is not without limitations, such as implicit biases toward longer responses and performance degradation over extended training periods (Ethayarajh et al., 2024; Meng et al., 2024). Subsequent advancements, including KTO (Ethayarajh et al., 2024), iPO (Azar et al., 2024), SimPO (Meng et al., 2024), ORPO (Hong et al., 2024), Step-DPO (Lai et al., 2024), and combination methods (Saeidi et al., 2024), have addressed many of these shortcomings." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 380, + 541, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 380, + 541, + 441 + ], + "spans": [ + { + "bbox": [ + 67, + 380, + 541, + 441 + ], + "type": "text", + "content": "While the above learning algorithms are formulated for single turn input-to-output tasks, it is also generalizable to multi-turn conversations as well as function-calling agentic workflows. In such scenarios, the next state " + }, + { + "bbox": [ + 67, + 380, + 541, + 441 + ], + "type": "inline_equation", + "content": "s_{t+1}" + }, + { + "bbox": [ + 67, + 380, + 541, + 441 + ], + "type": "text", + "content": " may not always be a concatenation of all previous states " + }, + { + "bbox": [ + 67, + 380, + 541, + 441 + ], + "type": "inline_equation", + "content": "s_{\\leq t}" + }, + { + "bbox": [ + 67, + 380, + 541, + 441 + ], + "type": "text", + "content": " and actions " + }, + { + "bbox": [ + 67, + 380, + 541, + 441 + ], + "type": "inline_equation", + "content": "a_{\\leq t}" + }, + { + "bbox": [ + 67, + 380, + 541, + 441 + ], + "type": "text", + "content": ", but it also depends on incoming response " + }, + { + "bbox": [ + 67, + 380, + 541, + 441 + ], + "type": "inline_equation", + "content": "h_t" + }, + { + "bbox": [ + 67, + 380, + 541, + 441 + ], + "type": "text", + "content": " from an outside environment, which can come from a follow-up user instruction or the returned result from a function call. In other words, one may define " + }, + { + "bbox": [ + 67, + 380, + 541, + 441 + ], + "type": "inline_equation", + "content": "s_{t+1} := [s_t; a_t; h_t]" + }, + { + "bbox": [ + 67, + 380, + 541, + 441 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 453, + 284, + 464 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 453, + 284, + 464 + ], + "spans": [ + { + "bbox": [ + 67, + 453, + 284, + 464 + ], + "type": "text", + "content": "4.2 Learning of Verifiers and Reward Models" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 474, + 541, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 474, + 541, + 617 + ], + "spans": [ + { + "bbox": [ + 67, + 474, + 541, + 617 + ], + "type": "text", + "content": "Verifiers play an important role in reasoning systems, improving performance both through training time credit assignment (Ouyang et al., 2022; Ziegler et al., 2019; Stiennon et al., 2020) and inference-time scaling verification (Snell et al., 2024). Reward modeling in the reasoning settings focuses on verifying the correctness of the reasoning chain, rather than evaluating using more general criteria, like helpfulness or safety (Ouyang et al., 2022). As a result, reward model training in reasoning is typically formulated as a binary classification problem between correct and incorrect reasoning steps. Based on label granularity, reward modeling is further categorized into outcome reward modeling (Section 4.2.1) and process reward modeling (Section 4.2.2). More recently, generative models for verification (Section 4.2.3) have emerged as a popular approach that produces actionable and explainable natural language feedback alongside rewards. In this section, we cover common training approaches for verifiers; In Section 6.1.3, we posit that verification itself may benefit from being studied as a reasoning problem itself, highlighting both concrete methods and recent analysis of failure modes in reasoning settings." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 628, + 258, + 641 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 628, + 258, + 641 + ], + "spans": [ + { + "bbox": [ + 67, + 628, + 258, + 641 + ], + "type": "text", + "content": "4.2.1 Outcome Reward Models (ORM)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 649, + 541, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 649, + 541, + 685 + ], + "spans": [ + { + "bbox": [ + 67, + 649, + 541, + 685 + ], + "type": "text", + "content": "The goal of outcome reward models (ORMs) for reasoning is to provide a scalar reward for a full trajectory. Given a dataset " + }, + { + "bbox": [ + 67, + 649, + 541, + 685 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 67, + 649, + 541, + 685 + ], + "type": "text", + "content": " of input prompt " + }, + { + "bbox": [ + 67, + 649, + 541, + 685 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 67, + 649, + 541, + 685 + ], + "type": "text", + "content": " and sampled outputs " + }, + { + "bbox": [ + 67, + 649, + 541, + 685 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 67, + 649, + 541, + 685 + ], + "type": "text", + "content": " with corresponding correctness label " + }, + { + "bbox": [ + 67, + 649, + 541, + 685 + ], + "type": "inline_equation", + "content": "c\\in \\{0,1\\}" + }, + { + "bbox": [ + 67, + 649, + 541, + 685 + ], + "type": "text", + "content": ", the goal of outcome reward modeling is to train the outcome reward model " + }, + { + "bbox": [ + 67, + 649, + 541, + 685 + ], + "type": "inline_equation", + "content": "r_{\\theta}" + }, + { + "bbox": [ + 67, + 649, + 541, + 685 + ], + "type": "text", + "content": " using the loss" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 156, + 690, + 539, + 704 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 690, + 539, + 704 + ], + "spans": [ + { + "bbox": [ + 156, + 690, + 539, + 704 + ], + "type": "interline_equation", + "content": "L _ {\\text {o r m}} (\\theta) = \\mathbb {E} _ {x, y \\sim \\mathcal {D}} [ c \\log \\sigma (r _ {\\theta} (x, y)) + (1 - c) \\log (1 - \\sigma (r _ {\\theta} (x, y))) ], \\tag {10}", + "image_path": "90bdd37917ff2f30ff386b7e3ffe255d3de4c023028d7e66579d66c8d88f6aae.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 708, + 541, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 708, + 541, + 733 + ], + "spans": [ + { + "bbox": [ + 67, + 708, + 541, + 733 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 708, + 541, + 733 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 67, + 708, + 541, + 733 + ], + "type": "text", + "content": " is the sigmoid function. Alternatively, one can train ORMs with a pairwise formulation. Here, the correctness labels are not explicitly encoded in the loss function, but are used to categorize multiple sampled" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 82, + 541, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 82, + 541, + 118 + ], + "spans": [ + { + "bbox": [ + 67, + 82, + 541, + 118 + ], + "type": "text", + "content": "outputs as correct or incorrect. From there, we can form pairs of outputs " + }, + { + "bbox": [ + 67, + 82, + 541, + 118 + ], + "type": "inline_equation", + "content": "\\{y_w, y_l\\}" + }, + { + "bbox": [ + 67, + 82, + 541, + 118 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 67, + 82, + 541, + 118 + ], + "type": "inline_equation", + "content": "y_w" + }, + { + "bbox": [ + 67, + 82, + 541, + 118 + ], + "type": "text", + "content": " reaches the correct outcome (e.g., correct answer for a math problem) and " + }, + { + "bbox": [ + 67, + 82, + 541, + 118 + ], + "type": "inline_equation", + "content": "y_l" + }, + { + "bbox": [ + 67, + 82, + 541, + 118 + ], + "type": "text", + "content": " reaches an incorrect outcome. The reward model " + }, + { + "bbox": [ + 67, + 82, + 541, + 118 + ], + "type": "inline_equation", + "content": "r_\\theta" + }, + { + "bbox": [ + 67, + 82, + 541, + 118 + ], + "type": "text", + "content": " is then typically trained with the Bradley-Terry loss, similar to that in DPO training (Equation 9)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 187, + 129, + 541, + 149 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 129, + 541, + 149 + ], + "spans": [ + { + "bbox": [ + 187, + 129, + 541, + 149 + ], + "type": "interline_equation", + "content": "L _ {\\text {o r m}} (\\theta) = - \\mathbb {E} _ {x, y _ {w}, y _ {l} \\sim D} \\left[ \\log \\left(\\sigma \\left(r _ {\\theta} (x, y _ {w}) - r _ {\\theta} (x, y _ {l})\\right)\\right) \\right], \\tag {11}", + "image_path": "e8050e02c74b6b7f46934188f83a40262bec0f41217b4b9bef7d184af8a97f4e.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 157, + 541, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 157, + 541, + 194 + ], + "spans": [ + { + "bbox": [ + 67, + 157, + 541, + 194 + ], + "type": "text", + "content": "Many other pairwise loss functions can be employed, such as hinge loss or other margin-based losses, focal loss, or variations of the Bradley-Terry loss. However, recent work (Liu et al., 2024a) has categorized the impact of loss functions, finding that the typical Bradley-Terry loss yields the best-performing ORM." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 205, + 251, + 219 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 205, + 251, + 219 + ], + "spans": [ + { + "bbox": [ + 69, + 205, + 251, + 219 + ], + "type": "text", + "content": "4.2.2 Process Reward Models (PRM)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 227, + 541, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 227, + 541, + 299 + ], + "spans": [ + { + "bbox": [ + 67, + 227, + 541, + 299 + ], + "type": "text", + "content": "While outcome reward models are relatively simple to train, outcome-driven verification may encourage incorrect reasoning chains that lead to the correct outcome. As such, recent work has sought to train process reward models (PRMs) to assess correctness for each step in the solution. This requires more fine-grained labels than ORM training. Specifically, assume that for an output " + }, + { + "bbox": [ + 67, + 227, + 541, + 299 + ], + "type": "inline_equation", + "content": "y = (a_{1},\\dots ,a_{T})" + }, + { + "bbox": [ + 67, + 227, + 541, + 299 + ], + "type": "text", + "content": ", we obtain process-level supervision of the form " + }, + { + "bbox": [ + 67, + 227, + 541, + 299 + ], + "type": "inline_equation", + "content": "c_{1},\\ldots ,c_{T}" + }, + { + "bbox": [ + 67, + 227, + 541, + 299 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 67, + 227, + 541, + 299 + ], + "type": "inline_equation", + "content": "c_{t}" + }, + { + "bbox": [ + 67, + 227, + 541, + 299 + ], + "type": "text", + "content": " is a binary indicator of step " + }, + { + "bbox": [ + 67, + 227, + 541, + 299 + ], + "type": "inline_equation", + "content": "a_{t}" + }, + { + "bbox": [ + 67, + 227, + 541, + 299 + ], + "type": "text", + "content": " correctness. Then, the step-wise cross-entropy loss below is applied." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 126, + 309, + 541, + 342 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 309, + 541, + 342 + ], + "spans": [ + { + "bbox": [ + 126, + 309, + 541, + 342 + ], + "type": "interline_equation", + "content": "L _ {p r m} (\\theta) = \\mathbb {E} _ {x, y \\sim \\mathcal {D}} \\left[ - \\frac {1}{T} \\sum_ {t = 1} ^ {T} \\left(c _ {t} \\log \\sigma \\left(r _ {\\theta} \\left(x, y _ {\\leq t}\\right)\\right) + \\left(1 - c _ {t}\\right) \\log \\sigma \\left(1 - \\sigma \\left(r _ {\\theta} \\left(x, y _ {\\leq t}\\right)\\right)\\right) \\right] \\right. \\tag {12}", + "image_path": "408f8e9fa6d813d4f1b1c775fc2d71b99bc1df299e3b8e20cfde96688a1b7412.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 350, + 541, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 350, + 541, + 495 + ], + "spans": [ + { + "bbox": [ + 67, + 350, + 541, + 495 + ], + "type": "text", + "content": "Above, " + }, + { + "bbox": [ + 67, + 350, + 541, + 495 + ], + "type": "inline_equation", + "content": "y_{\\leq t}" + }, + { + "bbox": [ + 67, + 350, + 541, + 495 + ], + "type": "text", + "content": " denotes the output prefix up to and including step " + }, + { + "bbox": [ + 67, + 350, + 541, + 495 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 67, + 350, + 541, + 495 + ], + "type": "text", + "content": ". In practice, collecting step-level annotations " + }, + { + "bbox": [ + 67, + 350, + 541, + 495 + ], + "type": "inline_equation", + "content": "c_t" + }, + { + "bbox": [ + 67, + 350, + 541, + 495 + ], + "type": "text", + "content": " can be extremely expensive. As a result, recent work has used variants of Monte Carlo Tree Search to automatically obtain said annotations. Specifically, the annotation for a reasoning step is obtained by rolling out the response until completion from the intermediate step, then using the outcome accuracy as a proxy for correctness (Wang et al., 2024g; Jiao et al., 2024a; Wang et al., 2024k; Dou et al., 2024a; Luo et al., 2024b; Setlur et al., 2024b). As a concrete example, suppose we roll out five completions randomly from the same prefix " + }, + { + "bbox": [ + 67, + 350, + 541, + 495 + ], + "type": "inline_equation", + "content": "y_{\\leq t}" + }, + { + "bbox": [ + 67, + 350, + 541, + 495 + ], + "type": "text", + "content": ", with three rollouts arriving at the correct answer. Then, the confidence that the prefix " + }, + { + "bbox": [ + 67, + 350, + 541, + 495 + ], + "type": "inline_equation", + "content": "y_{\\leq t}" + }, + { + "bbox": [ + 67, + 350, + 541, + 495 + ], + "type": "text", + "content": " is correct can be approximated as 0.6. These coarse signals can then be used to train a PRM. These two general approaches to constructing PRM training data have associated pros and cons: Collecting human annotations is expensive, but does not overfit PRM training to one particular policy. MCTS-based approaches yield annotations relatively quickly, but do not generalize beyond the policy from which samples are collected (Zheng et al., 2024; Setlur et al., 2024a)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 506, + 196, + 518 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 506, + 196, + 518 + ], + "spans": [ + { + "bbox": [ + 69, + 506, + 196, + 518 + ], + "type": "text", + "content": "4.2.3 Generative Verifiers" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 528, + 541, + 599 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 528, + 541, + 599 + ], + "spans": [ + { + "bbox": [ + 67, + 528, + 541, + 599 + ], + "type": "text", + "content": "ORMs and PRMs are discriminative verifiers, and are therefore unable to generate natural language to support their scores. However, natural language reasoning for evaluations is valuable both as actionable feedback and as an explainable mechanism. As a result, generative verifiers have been proposed to assess responses and provide natural language feedback. Generative verifiers have progressed from prompting frontier LLMs to evaluation-specific finetuning, relying on many of the same learning algorithms presented in Section 4.1. As such, the focus of this section is largely on training data curation." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 613, + 541, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 613, + 541, + 733 + ], + "spans": [ + { + "bbox": [ + 67, + 613, + 541, + 733 + ], + "type": "text", + "content": "Finetuned generative verifiers Generative verifiers are broadly classified as critique models or LLM-as-judge models. Critique models typically take as input a question and model response, and produce a critique with actionable feedback in natural language. The foundation of critique model training is critique training data. To construct training data, intentionally incorrect outputs are sampled from a policy model. Then, these outputs are corrected, usually with stronger model or human annotations. Using such samples, past methods (Wang et al., 2023c; Xi et al., 2024) have employed SFT (Section 4.1.1) to train critique models to imitate critiques. Other methods (Yao et al., 2023c; McAleese et al., 2024) have used used the typical RLHF workflow (Section 4.1.3), first training a reward model to use during PPO training. More recently, outcome-based RL (e.g., GRPO, as presented in Section 4.1.2) has been used for training, relying on either hand-crafted rewards (Akyurek et al., 2023) or execution feedback for code critique (Xie et al., 2025)." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 82, + 541, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 82, + 541, + 191 + ], + "spans": [ + { + "bbox": [ + 67, + 82, + 541, + 191 + ], + "type": "text", + "content": "LLM-as-judge models are a more general class of generative verifiers trained to evaluate model responses based on different protocols (pairwise evaluation, 1-5 rating, binary classification). These models rely on preference datasets, either annotated by a strong model or by humans. For example, to train a pairwise LLM-as-judge, one would collect a dataset of paired model responses for a given input prompt, then ask either a human or strong LLM to pick which response is better. Then, natural language explanations are distilled from stronger models, with distilled samples being categorized as correct or incorrect if the preference matches the annotation. From here, earlier LLM-as-judges (e.g., (Li et al., 2023b; Zheng et al., 2023a)) trained with SFT (Section 4.1.1), while newer approaches (Wang et al., 2024f; Hu et al., 2024) have used DPO (Section 4.1.3)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 202, + 541, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 202, + 541, + 275 + ], + "spans": [ + { + "bbox": [ + 67, + 202, + 541, + 275 + ], + "type": "text", + "content": "Discriminative-generative hybrid verifiers Because generation is a more difficult task than classification, generative verifiers have often lagged discriminative reward models in benchmark performance. Recent work (Zhang et al., 2024f; Mahan et al., 2024) has sought to unify the two under the Generative Reward Model umbrella. Here, models use similar datasets to those used to train LLM-as-judge models, but augment the SFT loss with an answer-token loss. Concretely, given a dataset " + }, + { + "bbox": [ + 67, + 202, + 541, + 275 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 67, + 202, + 541, + 275 + ], + "type": "text", + "content": " with samples comprised of an input " + }, + { + "bbox": [ + 67, + 202, + 541, + 275 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 67, + 202, + 541, + 275 + ], + "type": "text", + "content": ", model response " + }, + { + "bbox": [ + 67, + 202, + 541, + 275 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 67, + 202, + 541, + 275 + ], + "type": "text", + "content": ", and outcome label " + }, + { + "bbox": [ + 67, + 202, + 541, + 275 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 67, + 202, + 541, + 275 + ], + "type": "text", + "content": " (e.g., \"Yes\"/\"No\" for correctness), the loss" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 219, + 285, + 541, + 298 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 285, + 541, + 298 + ], + "spans": [ + { + "bbox": [ + 219, + 285, + 541, + 298 + ], + "type": "interline_equation", + "content": "L _ {G e n R M} (\\theta) = - \\mathbb {E} _ {x, y, c \\sim \\mathcal {D}} \\left[ \\log \\left(\\pi_ {\\theta} (c | x, y) \\right] \\right. \\tag {13}", + "image_path": "b9528171bce4bc3a7e670aa15ae19f22637e7db2734a43deaefcc1e7b1fdde5e.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 307, + 541, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 307, + 541, + 331 + ], + "spans": [ + { + "bbox": [ + 67, + 307, + 541, + 331 + ], + "type": "text", + "content": "is added to the typical language generation losses (e.g., SFT or DPO loss) that are used to train the model to produce natural language explanations. Here, " + }, + { + "bbox": [ + 67, + 307, + 541, + 331 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 67, + 307, + 541, + 331 + ], + "type": "text", + "content": " is the generative reward model being trained." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 347, + 203, + 361 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 347, + 203, + 361 + ], + "spans": [ + { + "bbox": [ + 69, + 347, + 203, + 361 + ], + "type": "text", + "content": "5 Learning to Reason" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 373, + 541, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 373, + 541, + 469 + ], + "spans": [ + { + "bbox": [ + 67, + 373, + 541, + 469 + ], + "type": "text", + "content": "In Section 3, we explored various methods for enhancing reasoning through inference-time computation. While these approaches have proven effective in many scenarios, they come with notable limitations, such as constrained improvements in reasoning capabilities (since model parameters remain unchanged) and the requirement for substantial computational resources during inference. With the advent of OpenAI o1 (OpenAI et al., 2024), there has been a growing emphasis on improving reasoning through training-time methods. Recently, Deepseek-R1 (DeepSeek-AI et al., 2025) demonstrated that training-time approaches can achieve reasoning improvements comparable to, or even surpassing, those of inference-scaling methods. Reflecting this trend, this section delves deeper into the role of training in advancing reasoning capabilities." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 475, + 541, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 475, + 541, + 571 + ], + "spans": [ + { + "bbox": [ + 67, + 475, + 541, + 571 + ], + "type": "text", + "content": "Specifically, we explore the data recipe, which focuses on constructing data (reasoning trajectories) tailored for reasoning tasks to facilitate training. At a high level, trajectory collection can be viewed as a form of simulation, where the generator produces reasoning steps—potentially incorporating calls and outputs from external tools—in response to either synthetic or real-world inputs. The primary challenge lies in ensuring that this simulation is both realistic and diverse while simultaneously providing meaningful supervision (reward) throughout the process. Depending on the architecture, as outlined in Section 2.3, this typically involves designing inputs (such as perception in single-agent systems or interaction in multi-agent systems) and outputs (such as actions in single-agent systems or coordination in multi-agent systems)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 577, + 541, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 577, + 541, + 661 + ], + "spans": [ + { + "bbox": [ + 67, + 577, + 541, + 661 + ], + "type": "text", + "content": "Furthermore, we explore the model recipe. Depending on the learning algorithms (Section 4), the model recipe can be 'offline' (non-RL, e.g., SFT and offline RL, e.g. DPO), which focuses on extracting supervision (reward) from the collected trajectories and leveraging them for training. It can also be 'online' (most of RL algorithms, e.g., GRPO and PPO), where there is no need to collect trajectories beforehand, but learning occurs directly on the questions and their rewards. Similar to Section 3, we start with standalone LLMs, detailing how each of their components is trained (Section 5.1). Building on this foundation, we expand the discussion to single-agent systems (Section 5.2) and multi-agent systems (Section 5.3)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 674, + 288, + 687 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 674, + 288, + 687 + ], + "spans": [ + { + "bbox": [ + 67, + 674, + 288, + 687 + ], + "type": "text", + "content": "5.1 Learning to Reason with Standalone LLM" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 696, + 541, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 696, + 541, + 733 + ], + "spans": [ + { + "bbox": [ + 67, + 696, + 541, + 733 + ], + "type": "text", + "content": "This section examines how standalone LLMs can be trained for reasoning tasks. For 'offline' methods, the process typically involves collecting reasoning trajectories, that lead to both correct and incorrect outcomes, followed by further training the LLM on these trajectories. In contrast, for 'online' methods, learning occurs" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 72, + 80, + 538, + 167 + ], + "blocks": [ + { + "bbox": [ + 72, + 80, + 538, + 167 + ], + "lines": [ + { + "bbox": [ + 72, + 80, + 538, + 167 + ], + "spans": [ + { + "bbox": [ + 72, + 80, + 538, + 167 + ], + "type": "table", + "html": "
PerspectiveMethodCharacteristicRepresentative Work
Constructing PromptsQuestion AugmentationExpand knowledge depth and breadth of seed questionsLuo et al. (2023b); Yu et al. (2024c)
Graph-based SynthesisSynthesize prompts guided by structured taxonomyLi et al. (2024a); Tang et al. (2024)
Collecting TrajectoriesRejection SamplingFilter low-quality trajectories from current policyDong et al. (2023)
Special Reasoning PatternImitate human-like reasoning behaviorYuan et al. (2024a); Qin et al. (2024)
Reasoning DistillationDistill reasoning capability from frontier reasoning modelHuang et al. (2024d)
Training from TrajectoriesImitation LearningLearn the behavior directly from the collected trajectoriesYu et al. (2024c)
Preference LearningOptimize preference between pos. and neg. trajectoriesJiao et al. (2024a)
Latent ReasoningCompress trajectory length using implicit reasoning tokensHao et al. (2024b)
", + "image_path": "f29e00273e605da0f7119a68c2e6571b38f75b90addfb3732aebec1cebca67eb.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 165, + 175, + 443, + 186 + ], + "lines": [ + { + "bbox": [ + 165, + 175, + 443, + 186 + ], + "spans": [ + { + "bbox": [ + 165, + 175, + 443, + 186 + ], + "type": "text", + "content": "Table 6: Summary of learning to reason with standalone LLM." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 211, + 541, + 271 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 211, + 541, + 271 + ], + "spans": [ + { + "bbox": [ + 67, + 211, + 541, + 271 + ], + "type": "text", + "content": "directly based on the sampled reasoning chains and their corresponding rewards. While much of the research focus has been on sampling high-quality outputs (i.e., trajectories), methods for generating a robust and diverse set of problems, or model inputs, have also garnered attention. We begin by detailing the process of collecting trajectories, which includes constructing inputs (Section 5.1.1) and obtaining outputs (Section 5.1.2). Subsequently, we describe how the LLM can be trained using the collected trajectories (Section 5.1.3)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 285, + 332, + 300 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 285, + 332, + 300 + ], + "spans": [ + { + "bbox": [ + 67, + 285, + 332, + 300 + ], + "type": "text", + "content": "5.1.1 Constructing High-quality Prompts for Reasoning" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 307, + 541, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 307, + 541, + 380 + ], + "spans": [ + { + "bbox": [ + 67, + 307, + 541, + 380 + ], + "type": "text", + "content": "To effectively drive knowledge distillation and model-seeking, we must curate a diverse collection of high-quality prompts that comprehensively span the target knowledge space. Relying on a narrow or homogeneous prompt set—even when sourced from a strong base model—limits exploration and undermines both distillation and reinforcement learning processes. By contrast, carefully crafted prompts expand the model's exploratory capacity, yielding richer representations and more robust downstream performance. As such, this section covers methods for collecting or synthesizing more challenging prompts." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 394, + 541, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 394, + 541, + 515 + ], + "spans": [ + { + "bbox": [ + 67, + 394, + 541, + 515 + ], + "type": "text", + "content": "Question augmentation A straightforward approach to generating additional inputs is to directly augment existing datasets using frontier LLMs. For example, Xu et al. (2024a) propose using LLMs to \"evolve\" existing prompt sets, expanding their depth (e.g., more complex instructions) and breadth (e.g., rarer concepts). Yu et al. (2024c) have proposed two main approaches to augment existing questions. One is simply rewriting using frontier LLMs, and the other one is self-verification, which transforms an condition in the question into unknown variable, shows the original answer, and proposes a new question by querying the value of the unknown variable. Luo et al. (2023b) adopt a comparable strategy, employing a question generator to iteratively produce both harder and easier versions of a given question, as inspired by the instruction evolution approach of Xu et al. (2024a). The synthesized instructions are further refined using a reward model to ensure quality." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 529, + 541, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 529, + 541, + 638 + ], + "spans": [ + { + "bbox": [ + 67, + 529, + 541, + 638 + ], + "type": "text", + "content": "Knowledge graph-based synthesis Directly augmenting prompts with LLMs can increase the size of the training set but does not inherently enhance diversity. To address this, knowledge graphs—structured taxonomies for organizing reasoning domains—have been utilized to construct input prompts with broader coverage. For instance, Li et al. (2024a) employ a frontier LLM to generate a knowledge graph directly, while Tang et al. (2024) task a frontier LLM with extracting a taxonomy from a seed dataset. These knowledge graphs are then used to progressively synthesize challenging questions, which are subsequently used to prompt larger teacher LLMs, resulting in high-quality instruction-tuning datasets with wider knowledge coverage. Additionally, Jiao et al. (2024b) leverage relation graphs derived from web documents to synthesize pretraining data, improving relation-based logical reasoning capabilities." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 651, + 317, + 664 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 651, + 317, + 664 + ], + "spans": [ + { + "bbox": [ + 67, + 651, + 317, + 664 + ], + "type": "text", + "content": "5.1.2 Collecting High-quality Reasoning Trajectories" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 673, + 541, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 673, + 541, + 733 + ], + "spans": [ + { + "bbox": [ + 67, + 673, + 541, + 733 + ], + "type": "text", + "content": "Beyond constructing high-quality prompts, researchers also refine outputs to collect better trajectories for training. These techniques often sample outputs that follow specific reasoning patterns, such as lengthy reasoning processes with self-reflection, and retain those that meet higher quality standards based on ground-truth labels. Consistent with our architecture definitions in Sec. 2.3, we treat the learned verifier as part of the environment in the agentic system. Consequently, this section focuses exclusively on methods that" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 82, + 541, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 82, + 541, + 106 + ], + "spans": [ + { + "bbox": [ + 68, + 82, + 541, + 106 + ], + "type": "text", + "content": "utilize existing ground-truth labels—such as answer labels in maths or test cases for code generation—while deferring discussion of methodologies that rely on learned verifiers (reward models or LLM-judges) to Sec. 5.2." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 118, + 541, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 118, + 541, + 239 + ], + "spans": [ + { + "bbox": [ + 68, + 118, + 541, + 239 + ], + "type": "text", + "content": "Rejection sampling Rejection sampling (Dong et al., 2023) aims to select higher-quality samples by repeatedly sampling from the policy model (reasoner). Quality is determined through two primary sources: (1) a learned verifier, which we discuss in Section 5.2, and (2) direct comparison with ground-truth labels (when available), where samples inconsistent with the ground-truth labels are discarded. Yuan et al. (2023) apply this idea to mathematical reasoning, introducing edit distance to ensure diversity among trajectories. Zelikman et al. (2022) propose STaR to incorporate the correct answer into the instruction, prompting LLMs to iteratively refine incorrect reasoning traces and generate higher-quality trajectories. Tong et al. (2024) employ an up-sampling strategy to increase the proportion of successful trajectories for more challenging questions. This approach has become a standard technique for iterative model self-improvement, as demonstrated in works such as (Jiao et al., 2025; Guan et al., 2025; Dou et al., 2024b)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 251, + 541, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 251, + 541, + 346 + ], + "spans": [ + { + "bbox": [ + 67, + 251, + 541, + 346 + ], + "type": "text", + "content": "Encourage special reasoning pattern Another line of research focuses on leveraging human-like reasoning behaviors—such as self-reflection, deep reasoning, and thinking-before-action—to improve reasoning accuracy and reduce hallucinations. One notable approach is Reasoning-as-Planning (RAP) (Hao et al., 2023), which divides reasoning into three steps: thinking, taking action, and observing (inferring) changes in the environment. When applied to text-based reasoning problems, LLMs simulate environment states after taking actions, leading to more accurate reasoning. Building on this idea, Yuan et al. (2024a) and Chen et al. (2023a) use frontier LLMs like GPT-3.5 and GPT-4 to synthesize trajectories with this pattern for reasoning problems, facilitating imitation learning." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 352, + 541, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 352, + 541, + 400 + ], + "spans": [ + { + "bbox": [ + 68, + 352, + 541, + 400 + ], + "type": "text", + "content": "Besides, inspired by the success of long and deep reasoning revealed by OpenAI's o1 model, which incorporate self-reflection and search, some researchers propose imitating this process through rule-based synthesis. For instance, Qin et al. (2024) flatten MCTS trajectories, including failed branches, and ask general models to generate bridge sentences for natural transition from the failed nodes to the ones along the successful paths." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 68, + 412, + 541, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 412, + 541, + 484 + ], + "spans": [ + { + "bbox": [ + 68, + 412, + 541, + 484 + ], + "type": "text", + "content": "Reasoning distillation Several studies distill reasoning patterns from models capable of producing good reasoning chains (e.g., OpenAI o1) to replicate similar behaviors in smaller models. For example, Huang et al. (2024d), NovaSky Team (2025), Bespoke Labs (2025) and Muennighoff et al. (2025) distill reasoning chains from models like OpenAI-o1, Qwen-QWQ-32B, DeepSeek-R1, and Gemini Thinking Experimental, respectively. Min et al. (2024) diversify this approach by distilling from multiple reasoning models and aggregating outputs into a unified format." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 496, + 224, + 509 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 496, + 224, + 509 + ], + "spans": [ + { + "bbox": [ + 69, + 496, + 224, + 509 + ], + "type": "text", + "content": "5.1.3 Training from Trajectories" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 516, + 540, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 516, + 540, + 540 + ], + "spans": [ + { + "bbox": [ + 68, + 516, + 540, + 540 + ], + "type": "text", + "content": "Using the collected trajectories, training can be conducted by designing the input and output formats for the algorithms discussed in Section 4." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 68, + 552, + 541, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 552, + 541, + 685 + ], + "spans": [ + { + "bbox": [ + 68, + 552, + 541, + 685 + ], + "type": "text", + "content": "Supervised Fine-Tuning (SFT) As discussed in Sec. 4.1.1, the most straightforward approach to training reasoning-capable LLMs is to fine-tune a model using SFT on collected trajectories. Methods such as (NovaSky Team, 2025; Bespoke Labs, 2025; Huang et al., 2024d) and (Min et al., 2024) utilize SFT with a modest number of data samples (4K-20K) to replicate the reasoning capabilities of OpenAI's o1 model. Recent SFT approaches have shifted focus to data scaling, with Xu et al. (2025e) exploring the impact of increasing data quantity up to 1 million CoT samples. Their findings demonstrate that performance improves with data scale, albeit with diminishing returns. In contrast, Muenighoff et al. (2025) adopt a sample-efficient approach, curating a high-quality 1K-sample reasoning dataset for fine-tuning. They show that this smaller dataset, combined with strategic inference-time prompting, achieves performance comparable to models trained on larger datasets. Similar strategies have been applied in domain-specific reasoning models, such as earlier math reasoning systems Yu et al. (2023a); Yue et al. (2023)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 68, + 696, + 541, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 696, + 541, + 733 + ], + "spans": [ + { + "bbox": [ + 68, + 696, + 541, + 733 + ], + "type": "text", + "content": "Preference learning and reinforcement learning While SFT approaches have shown effectiveness, other studies demonstrate that preference learning further enhances performance. Min et al. (2024) study DPO, while Xu et al. (2025e) explore various post-training preference learning methods. Hui et al. (2024)," + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 82, + 541, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 82, + 541, + 191 + ], + "spans": [ + { + "bbox": [ + 67, + 82, + 541, + 191 + ], + "type": "text", + "content": "Min et al. (2024), and Jiao et al. (2024a) all employ DPO with preference pairs derived from code test cases, outcome correctness, and a PRM trained on automatic supervision, respectively. Another line of work focuses on step-level DPO to optimize reasoning action selection. Specifically, Zhang et al. (2024h) use Tree-of-Thought (Yao et al., 2023a) to estimate outcome rewards and backpropagate them to intermediate nodes for quality assessment. Step-level DPO is then applied to pairs sharing the same trajectory prefix but with contrasting next actions. Lai et al. (2024) directly use GPT-4o to identify the earliest incorrect reasoning step and construct contrastive step-level DPO pairs for preference learning. Yuan et al. (2024d) adopt an iterative DPO approach in a self-rewarding setting, where the policy model itself acts as an LLM-as-judge to progressively improve its capabilities." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 196, + 541, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 196, + 541, + 280 + ], + "spans": [ + { + "bbox": [ + 67, + 196, + 541, + 280 + ], + "type": "text", + "content": "In addition to preference learning, RL with verifiable answer labels also demonstrate importance in improving reasoning, where rule-based rewards by checking the correctness of sampled solutions are employed rather than reward models.6 Lambert et al. (2024) use both math reasoning and instruction following data for outcome-based reinforcement learning without reward models. Deepseek-R1 (DeepSeek-AI et al., 2025) further reveal the potential of pure reinforcement learning with verifiable answers. Yu et al. (2025) provide valuable reproduction of Deepseek-R1 on Qwen2.5-32B, including open-sourced data, code, and technical details about loss function design, reward shaping, and dynamic sampling." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 291, + 541, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 291, + 541, + 376 + ], + "spans": [ + { + "bbox": [ + 67, + 291, + 541, + 376 + ], + "type": "text", + "content": "Training with latent reasoning Typical reasoning models generate long reasoning chains and have demonstrated strong empirical performance. However, this comes at the cost of increased inference time, as they produce lengthy natural language reasoning traces. These traces often contain many tokens that improve the flow and coherence of the output, with only a small fraction directly contributing to the reasoning process. To address this inefficiency, an alternative approach, known as latent reasoning, focuses on representing reasoning trajectories implicitly. This is achieved either by omitting intermediate reasoning tokens entirely or by compressing them into specialized reasoning tokens or continuous vector representations." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 380, + 541, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 380, + 541, + 453 + ], + "spans": [ + { + "bbox": [ + 67, + 380, + 541, + 453 + ], + "type": "text", + "content": "Earlier work in continuous reasoning focused on compressing natural language reasoning chains into a smaller number of tokens. Deng et al. (2023b) employ knowledge distillation to encode the knowledge from natural language reasoning tokens into intermediate representations of the student model. During inference, the model generates only the final answer without producing additional rationale. This approach is further refined through curriculum learning (Deng et al., 2024b), which gradually removes reasoning tokens during training to reduce distribution mismatch." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 459, + 539, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 459, + 539, + 639 + ], + "spans": [ + { + "bbox": [ + 67, + 459, + 539, + 639 + ], + "type": "text", + "content": "However, removing all explicit intermediate reasoning tokens may compromise the model's expressivity (i.e., ability to articulate complex reasoning) (Prystawski et al., 2023). A natural trade-off is to retain a limited number of reasoning tokens, making them implicit to enhance expressiveness while preserving performance. Goyal et al. (2024) introduce learnable tokens during pre-training and fine-tuning within standard CoT trajectories, enabling the model to perform additional computation before generating an output token. Wang et al. (2023d) explore various techniques for compressing reasoning steps from training trajectories into a fixed set of planning tokens. At the start of each reasoning step, the model generates a planning token, whose encoded \"knowledge\" guides the generation of more coherent outputs. Hao et al. (2024b) propose using the last-layer hidden states before the language modeling head as implicit reasoning token representations, feeding these back into the model to generate the next token auto-regressively. These implicit representations are optimized in a stage-wise manner, akin to the approach of Deng et al. (2024b). Xu et al. (2025f) propose an approach for continuous-space reasoning that does not require modifying the LLM reasoner. Specifically, they employ a lightweight fixed assistant model to generate instance-specific soft thought tokens speculatively as the initial chain of thoughts, which are then mapped into the LLM's representation space via a trainable projection module." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 651, + 309, + 664 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 651, + 309, + 664 + ], + "spans": [ + { + "bbox": [ + 67, + 651, + 309, + 664 + ], + "type": "text", + "content": "5.2 Learning to Reason with Single-agent Systems" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 673, + 541, + 697 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 673, + 541, + 697 + ], + "spans": [ + { + "bbox": [ + 67, + 673, + 541, + 697 + ], + "type": "text", + "content": "As discussed in Section 2.3, agentic systems enhance the reasoning capabilities of standalone LLMs by incorporating agent-environment interactions. These interactions enable the agent to perceive its environment" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 67, + 702, + 541, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 702, + 541, + 732 + ], + "spans": [ + { + "bbox": [ + 67, + 702, + 541, + 732 + ], + "type": "text", + "content": "6We treat the work using reward model/tool-based verifier for RL in the scope of single-agent systems (see Sec. 5.2) 7As discussed in Section 4.2, in outcome-based RL, the reward is assigned to the entire trajectory. This contrasts with process-based RL, which assigns a reward at each step." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "28" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 27 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 72, + 80, + 538, + 153 + ], + "blocks": [ + { + "bbox": [ + 72, + 80, + 538, + 153 + ], + "lines": [ + { + "bbox": [ + 72, + 80, + 538, + 153 + ], + "spans": [ + { + "bbox": [ + 72, + 80, + 538, + 153 + ], + "type": "table", + "html": "
PerspectiveMethodCharacteristicRepresentative Work
Action-Environment InteractionsIncorporating FeedbackUse environment feedback to filter trajectoriesNi et al. (2024); Xin et al. (2024b)
Training External ModelsTrain models (e.g., to critic) from the interactionWu et al. (2024c)
Search with VerifiersUse verifiers to identify better reasoning trajectoriesWan et al. (2024c)
Distillation from TeacherDistill capability from frontier reasoning modelGou et al. (2024); Ma et al. (2024a)
Training from TrajectoriesSupervised Fine-TuningCollected offline trajectories + learn via SFTDou et al. (2024b); Yin et al. (2024)
Reinforcement LearningLearning directly on questions and their rewardsShao et al. (2024)
Learning with RefinerTrain refiner model to iteratively improve the last-round solution.Xiong et al. (2025)
", + "image_path": "6468e2dd5f73b620b8760b6d78b4044d48515269db4a51d44ab8543841c582c9.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 157, + 161, + 451, + 173 + ], + "lines": [ + { + "bbox": [ + 157, + 161, + 451, + 173 + ], + "spans": [ + { + "bbox": [ + 157, + 161, + 451, + 173 + ], + "type": "text", + "content": "Table 7: Summary of learning to reason with single-agent systems." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 196, + 541, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 196, + 541, + 232 + ], + "spans": [ + { + "bbox": [ + 67, + 196, + 541, + 232 + ], + "type": "text", + "content": "and accordingly perform actions. This section explores how simulation is achieved through the design of such perceptions and agent actions. It then covers training methods—how agents are trained using these trajectories. Additionally, we discuss how predefined patterns are leveraged when collecting trajectories." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 245, + 389, + 258 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 245, + 389, + 258 + ], + "spans": [ + { + "bbox": [ + 67, + 245, + 389, + 258 + ], + "type": "text", + "content": "5.2.1 Trajectory Collection through Agent-Environment Interactions" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 266, + 541, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 266, + 541, + 315 + ], + "spans": [ + { + "bbox": [ + 67, + 266, + 541, + 315 + ], + "type": "text", + "content": "By interacting with the external world in different ways, agents can effectively construct trajectories that help refine their reasoning process. These interactions to enrich reasoning take the form of (a) incorporating execution feedback, (b) training external models to help reasoning, (c) search with verifiers, and (d) trajectory distillation from stronger teacher agents." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 327, + 541, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 327, + 541, + 460 + ], + "spans": [ + { + "bbox": [ + 67, + 327, + 541, + 460 + ], + "type": "text", + "content": "Incorporating execution feedback Through active interaction with the environment, the agent can obtain valuable feedback for trajectory filtering. Building on STaR (Zelikman et al., 2022) (discussed in Sec. 5.1.2), NExT (Ni et al., 2024) leverages unit tests (Ye et al., 2022) to obtain self-generated rationales that lead to correct solutions for training. AlphaProof (AlphaProof & teams, 2024) and DeepSeek-Prover (Xin et al., 2024a) solve formal theorem-proving problems by generating potential solutions and validating them through interaction with the Lean proof assistant (De Moura et al., 2015), either proving or disproving the solutions. Xin et al. (2024b) further improve DeepSeek-Prover by introducing RMaxTS, an exploration strategy driven by intrinsic rewards to generate diverse proof paths. Furthermore, the agent can integrate environmental information directly into the training process to improve its reasoning capabilities. For example, Cummins et al. (2023) train a 7B model from scratch, achieving significantly improved code optimization performance by leveraging optimizing transformations from external LLVM compilers." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 473, + 541, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 473, + 541, + 534 + ], + "spans": [ + { + "bbox": [ + 67, + 473, + 541, + 534 + ], + "type": "text", + "content": "Training external models The agent can leverage its interaction with the environment to train external models that can in turn help the agent's reasoning. For example, Wu et al. (2024c) train a critic model to identify relatively easier problems for the policy to explore and guide the policy in searching for deeper proof paths. Re-ReST (Dou et al., 2024b) proposes training a refiner to correct the agent's wrong output based on environmental feedback." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 547, + 541, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 547, + 541, + 619 + ], + "spans": [ + { + "bbox": [ + 67, + 547, + 541, + 619 + ], + "type": "text", + "content": "Reasoning search with verifiers Search-based methods address sampling challenges for more difficult problems by leveraging external reward models or generation probabilities to guide decoding. For example, Wan et al. (2024c) develop a Monte Carlo Tree Search (MCTS)-based approach to identify better reasoning trajectories. Each tree node represents either a sentence or token, and a learned LLM-based value function and outcome reward model are used to estimate expected returns during the search process. This method can be applied for both inference-time path selection and training-time imitation learning." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 624, + 541, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 624, + 541, + 733 + ], + "spans": [ + { + "bbox": [ + 67, + 624, + 541, + 733 + ], + "type": "text", + "content": "Guan et al. (2025) rely solely on outcome labels to iteratively update the policy model and a process preference model (PPM) through MCTS. The PPM approximates the Q-value of intermediate reasoning steps. Lai et al. (2024) use an LLM-as-judge to identify the first reasoning step in a sampled trajectory that contains an error. The trajectory up to the error is then used to sample new outputs, and DPO preference pairs are formed from correct and incorrect outputs. Zhang et al. (2024h) focus on unsupervised settings where answer labels are unavailable. Discarded steps collected during the search process are treated as negative actions, contrasting with the steps retained in the final path for DPO training. For multi-step reasoning in dynamic environments, such as web navigation, Putta et al. (2024) propose combining guided MCTS with self-critique to facilitate more effective exploration." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "29" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 28 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 82, + 541, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 82, + 541, + 262 + ], + "spans": [ + { + "bbox": [ + 67, + 82, + 541, + 262 + ], + "type": "text", + "content": "Trajectory distillation from stronger teacher agents To tackle challenging mathematical problems, Gou et al. (2024) curate interactive tool-use (e.g., code execution) trajectories using GPT-4, derived from existing mathematical datasets across various domains. Similarly, MuMath-Code (Yin et al., 2024) employs multi-perspective data augmentation to generate diverse math questions and synthesizes code-nested solutions using GPT-4. Beyond mathematics, other domains have also been explored. For instance, Ma et al. (2024a) construct a tool-augmented training set for scientific reasoning by prompting GPT-4. CoGEX (Weir et al., 2024) extends LLMs' program synthesis capabilities to tasks that are not easily expressible as code, such as commonsense reasoning and sarcasm understanding. To collect training trajectories, GPT-4 is used to transform the Alpaca dataset (Taori et al., 2023) into the required format. Ke et al. (2025b) explore collecting trajectories from a more capable generative reward model (GPT-4o) to train a finance-expert model by identifying and correcting the first erroneous step in the reasoning process. Additionally, AgentBank (Song et al., 2024) introduces the largest dataset of agent-environment interaction trajectories, comprising 16 tasks across 5 distinct agent skill dimensions. This dataset is created by annotating actions and their corresponding rationales using LLMs of varying scales, addressing key challenges in trajectory collection, such as scalability." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 267, + 541, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 267, + 541, + 304 + ], + "spans": [ + { + "bbox": [ + 67, + 267, + 541, + 304 + ], + "type": "text", + "content": "In addition to leveraging trajectories from GPT-4, Gou et al. (2024) introduce output space shaping by incorporating samples generated by the agent itself. Specifically, they train the agent on both self-sampled correct trajectories and those corrected by a teacher model, promoting diversity in plausible reasoning steps." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 316, + 256, + 328 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 316, + 256, + 328 + ], + "spans": [ + { + "bbox": [ + 69, + 316, + 256, + 328 + ], + "type": "text", + "content": "5.2.2 Agent Training from Trajectories" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 337, + 541, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 337, + 541, + 515 + ], + "spans": [ + { + "bbox": [ + 67, + 337, + 541, + 515 + ], + "type": "text", + "content": "Supervised Fine-Tuning (SFT) After collecting trajectories, many methods apply supervised fine-tuning (SFT) to train the agent, enabling models with little prior experience in agentic environments to adapt quickly. Dou et al. (2024b) enhances agent reasoning by incorporating refiner-corrected samples into the self-training process. NExT (Ni et al., 2024) uses filtered trajectories to train agents for program repair tasks, while Weir et al. (2024) fine-tune agents on collected trajectories to enable the generation and emulation of pseudo-programs. AlphaProof (AlphaProof & teams, 2024) and DeepSeek-Prover (Xin et al., 2024a) iteratively train and refine the policy model using verified proofs, improving performance in theorem proving tasks. Similarly, Gou et al. (2024), Yin et al. (2024), Ma et al. (2024a), and Song et al. (2024) fine-tune agents on agent-environment interaction trajectories generated by proprietary LLMs, enhancing reasoning capabilities across diverse domains. Notably, MuMath-Code (Yin et al., 2024) adopts a two-stage training strategy, first fine-tuning on pure CoT data and then on code-nested data. Chen et al. (2024e) introduce Agent-FLAN, a fine-tuning method designed to improve LLMs' agent capabilities while addressing challenges such as distribution shifts and hallucinations in training data. By redesigning the training corpus and incorporating negative samples, Agent-FLAN enhances both agent-specific and general capabilities of LLMs." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 529, + 539, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 529, + 539, + 732 + ], + "spans": [ + { + "bbox": [ + 67, + 529, + 539, + 732 + ], + "type": "text", + "content": "Reinforcement Learning (RL) Beyond imitation learning through SFT, recent approaches have leveraged reinforcement learning to further enhance reasoning capabilities. Notably, GRPO (Shao et al., 2024; DeepSeek-AI et al., 2025), which employs verifiable outcome rewards during online RL training, has demonstrated strong empirical performance. Havrilla et al. (2024) investigate multiple RL algorithms (e.g., Expert Iteration, PPO) for math reasoning tasks, finding that incorporating outcome reward models has negligible effects on performance for both Expert Iteration and PPO. Similarly, Shao et al. (2024) observe relatively minor performance gains when using PRMs during GRPO training. Yang et al. (2024b) explore using a PRM to \"shape\" outcome rewards by using a linear combination of outcome and PRM rewards for GRPO training. In contrast, Wang et al. (2024g); Luo et al. (2023a); Jiao et al. (2024a) demonstrate that using a trained PRM during PPO training leads to significant performance improvements. Similar gains are observed in the code generation domain (Dai et al., 2024), where the PRM serves both as a reward signal and as an initial checkpoint for the value function during PPO. Zhang et al. (2024a) iteratively train both a PRM and LLM, while Setlur et al. (2024b) provide a new perspective by comparing Q-value-based PRMs with advantage function-based ones, showing improved learning efficiency and performance in guided reinforcement learning. Concurrently, Gao et al. (2024a) address reward hacking (Casper et al., 2023)—where the policy model generates numerous correct but irrelevant reasoning steps to inflate rewards—by implementing clipping and computing relative, step-adjacent rewards." + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "30" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 29 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 72, + 80, + 538, + 155 + ], + "blocks": [ + { + "bbox": [ + 72, + 80, + 538, + 155 + ], + "lines": [ + { + "bbox": [ + 72, + 80, + 538, + 155 + ], + "spans": [ + { + "bbox": [ + 72, + 80, + 538, + 155 + ], + "type": "table", + "html": "
PerspectiveMethodCharacteristicRepresentative Work
Designing CommunicationCentralized communicationUse a centralized controller for information aggregationCanese et al. (2021); Matta et al. (2019)
Conditioned information sharingShare information based on relevancy and privacyHong et al. (2023); Qiu et al. (2024)
Coordinating ActionsLeverage knowledgeUtilize expert knowledge as constraintsLau et al. (2012)
Graph-based methodsUse graphs as structured frameworksRuan et al. (2022); Li et al. (2020)
Hierarchical approachDivide policies to strategy and executionXu et al. (2023)
Training from TrajectoriesTraining data from interactionsObtain high-quality trajectories from interactionsLi et al. (2024c); Estornell et al. (2024)
Gradient modificationModify gradients towards optimal pointsLi et al. (2024f)
", + "image_path": "9a72af506464d7266485c42711ec30e3529535cc0cb4bc99428f137ea9d892be.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 162, + 163, + 447, + 175 + ], + "lines": [ + { + "bbox": [ + 162, + 163, + 447, + 175 + ], + "spans": [ + { + "bbox": [ + 162, + 163, + 447, + 175 + ], + "type": "text", + "content": "Table 8: Summary of learning to reason for multi-agent systems." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 198, + 541, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 198, + 541, + 258 + ], + "spans": [ + { + "bbox": [ + 67, + 198, + 541, + 258 + ], + "type": "text", + "content": "Qiao et al. (2023a) introduce TRICE, a two-stage framework that enables agents to determine when and how to use tools through Reinforcement Learning with Execution Feedback (RLEF) from external tools. Similarly, Xin et al. (2024b) enhance DeepSeek-Prover by incorporating reinforcement learning from proof assistant feedback (RLPAF). To effectively learn from both successful and unsuccessful agent-environment interactions, Putta et al. (2024) develop an off-policy variant of DPO for iterative training." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 271, + 541, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 271, + 541, + 332 + ], + "spans": [ + { + "bbox": [ + 67, + 271, + 541, + 332 + ], + "type": "text", + "content": "Learning with refiner For more challenging questions, models may fail to generate enough successful trajectories to serve as a reliable positive training signal. However, even trajectories with incorrect outcomes can still be leveraged effectively. For example, Qu et al. (2024a) train a correction model using RL to iteratively refine generated model responses. Similarly, Tang et al. (2025) propose a self-evolving framework to train a critique model, which enhances the quality of outputs through continuous feedback." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 338, + 541, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 338, + 541, + 517 + ], + "spans": [ + { + "bbox": [ + 67, + 338, + 541, + 517 + ], + "type": "text", + "content": "Refiner models can also be integrated into the search process to iteratively improve generation quality. For instance, Snell et al. (2024) train a refiner model via RL (Qu et al., 2024b) to refine outputs sequentially. The final prediction is obtained through majority voting over all predictions generated during this iterative refinement process, effectively scaling test-time computation. Xi et al. (2024) develop a step-level critique model that provides feedback for each reasoning step, using training instances collected from GPT-4o. This feedback serves two purposes: (1) expanding training data to improve the actor model, and (2) scaling test-time computation through iterative self-refinement in a multi-agent setup. Zhang et al. (2024b) combine reasoning and self-refinement into a single MCTS framework, where each node is either a reasoning node (generating complete reasoning trajectories) or a refining node (identifying and correcting reasoning flaws). A learned pairwise reward model compares the quality of refined and original outputs, estimating the expected returns of each node. However, this work does not explicitly account for the inference setting, where neither the reasoner nor the refiner has access to the correctness of the sampled response. This can lead to refiners inadvertently degrading originally correct solutions. To address this issue, Xiong et al. (2025) introduce a learnable self-rewarding mechanism. This approach mitigates the risk of worsening correct solutions and alleviates the distribution-shifting problem in self-correction (Kumar et al., 2024)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 532, + 303, + 544 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 532, + 303, + 544 + ], + "spans": [ + { + "bbox": [ + 67, + 532, + 303, + 544 + ], + "type": "text", + "content": "5.3 Learning to Reason with Multi-agent System" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 554, + 541, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 554, + 541, + 639 + ], + "spans": [ + { + "bbox": [ + 67, + 554, + 541, + 639 + ], + "type": "text", + "content": "In Section 2.3, we discussed how multi-agent systems extend single-agent systems through agent-agent communication. This enables agents to assume distinct roles, exchange messages, and coordinate their actions before interacting with the environment. In this section, we explore how trajectory collection can be achieved through the careful design of agent-agent communication and the coordination of actions across different agents. As a system level, communication serves as the input or perception mechanism for participating agents, focusing on the protocols governing message exchange. Meanwhile, actions represent the output of the system, addressing how consensus is reached given the diverse actions proposed by individual agents." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 651, + 288, + 664 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 651, + 288, + 664 + ], + "spans": [ + { + "bbox": [ + 67, + 651, + 288, + 664 + ], + "type": "text", + "content": "5.3.1 Designing Agent-Agent Communication" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 673, + 541, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 673, + 541, + 733 + ], + "spans": [ + { + "bbox": [ + 67, + 673, + 541, + 733 + ], + "type": "text", + "content": "In a multi-agent framework, ensuring that each agent is aware of the actions of others is critical, as a well-designed communication system can significantly enhance collective intelligence (Guo et al., 2024b). One effective solution is the use of a centralized controller (Canese et al., 2021). For example, Matta et al. (2019) propose a centralized aggregation center that constructs a global swarm matrix by aggregating the Q-value tables of all agents. Similarly, the MARCO framework (Zhang et al., 2021) employs centralized training with" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "type": "text", + "content": "31" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 30 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 82, + 541, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 82, + 541, + 118 + ], + "spans": [ + { + "bbox": [ + 67, + 82, + 541, + 118 + ], + "type": "text", + "content": "decentralized execution to improve sample efficiency in partially observable multi-agent environments. By learning a shared model that generalizes across agents' policies and directing exploration toward uncertain areas, MARCO optimizes reasoning and resource utilization in cooperative tasks." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 124, + 541, + 208 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 124, + 541, + 208 + ], + "spans": [ + { + "bbox": [ + 67, + 124, + 541, + 208 + ], + "type": "text", + "content": "To enable effective communication among agents, Sukhbaatar et al. (2016) introduce a neural communication model with a learned protocol tailored to the task. Additionally, a shared message pool (Hong et al., 2023) can be implemented, where agents send messages and subscribe to relevant ones based on their individual profiles. In recent work by Qiu et al. (2024), each agent maintains a private intention, which includes its current goal and associated sub-tasks. These intentions are broadcast periodically, and a propagation network converts them into teammate-specific communication messages, ensuring that relevant goals are shared with the appropriate teammates." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 221, + 313, + 234 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 221, + 313, + 234 + ], + "spans": [ + { + "bbox": [ + 67, + 221, + 313, + 234 + ], + "type": "text", + "content": "5.3.2 Coordinating Actions among Multiple Agents" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 243, + 541, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 243, + 541, + 361 + ], + "spans": [ + { + "bbox": [ + 67, + 243, + 541, + 361 + ], + "type": "text", + "content": "To enhance coordination among multiple agents, various approaches have been proposed, including leveraging expert knowledge, graph-based frameworks, and hierarchical structures to improve efficiency and effectiveness. For better coordination of actions across agents, Lau et al. (2012) utilize expert coordination knowledge as constraints to refine the exploration and learning process. By reducing the action space and focusing on promising states, this approach enhances decision-making. Additionally, graph-based methods have been explored to improve coordination. For instance, the Graph-based Coordination Strategy (GCS) (Ruan et al., 2022) introduces a framework that employs a directed acyclic graph to coordinate agent policies. This enables agents to synchronize their actions through predefined temporal sequences. Similarly, Deep Implicit Coordination Graphs (DICG) (Li et al., 2020) propose a graph neural network-based module to dynamically infer coordination structures for multi-agent reinforcement learning (MARL)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 368, + 541, + 404 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 368, + 541, + 404 + ], + "spans": [ + { + "bbox": [ + 67, + 368, + 541, + 404 + ], + "type": "text", + "content": "Furthermore, hierarchical approaches have been developed to enhance synchronization. The Hierarchical Cooperative Multi-Agent Learning (HAVEN) framework (Xu et al., 2023) divides policies into two levels—strategy and execution—improving both inter-agent and inter-level coordination." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 417, + 285, + 430 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 417, + 285, + 430 + ], + "spans": [ + { + "bbox": [ + 67, + 417, + 285, + 430 + ], + "type": "text", + "content": "5.3.3 Multi-Agent Training from Trajectories" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 438, + 541, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 438, + 541, + 617 + ], + "spans": [ + { + "bbox": [ + 67, + 438, + 541, + 617 + ], + "type": "text", + "content": "Compared to single-agent scenarios, multi-agent training introduces additional challenges in higher coordination and communication complexity and recent approaches have leveraged different ways to address the challenge. DEBATUNE (Li et al., 2024c) employs a multi-round debate mechanism between two agents with opposing stances to generate training data. Through iterative debate, arguments are refined, resulting in high-quality and diverse outputs. During the training phase, models are fine-tuned using these debate-generated trajectories, enabling controllability and alignment with user-defined stances. Similarly, Subramaniam et al. (2025) fine-tune a society of agents, starting from the same base model, on independent data generated through multi-agent interactions. These agents specialize in distinct roles, such as \"generation\" and \"critic\" producing diverse reasoning trajectories. Training on such varied trajectories fosters specialization and mitigates performance plateaus. Acc-Debate (Estornell et al., 2024) utilizes an Actor-Critic framework to train a team of two agents collaboratively. One agent serves as the \"Actor\" generating responses, while the other acts as the \"Critic\" refining those responses. Training alternates between optimizing the Actor and Critic models, leveraging partial trajectory rewards which captures the expectation of reaching the correct answer at intermediate time stepsto address temporal dependencies in the debate process. This approach enhances collaboration and improves final performance." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 624, + 541, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 624, + 541, + 660 + ], + "spans": [ + { + "bbox": [ + 67, + 624, + 541, + 660 + ], + "type": "text", + "content": "Furthermore, Li et al. (2024f) address the challenge of mixed-motive cooperation in multi-agent systems by modifying gradients to guide agents toward stable fixed points that balance individual and collective interests. This method enhances the ability to optimize trajectories for effective collaboration." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 674, + 320, + 687 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 674, + 320, + 687 + ], + "spans": [ + { + "bbox": [ + 67, + 674, + 320, + 687 + ], + "type": "text", + "content": "5.4 Toward Cost-aware and Inference-aware Training" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 696, + 541, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 696, + 541, + 733 + ], + "spans": [ + { + "bbox": [ + 67, + 696, + 541, + 733 + ], + "type": "text", + "content": "As reasoning models grow increasingly complex, ensuring both efficiency and effectiveness becomes crucial. Inference-time scaling and learning-to-reason approaches play complementary roles, as most inference-time scaling methods can be applied to models specifically trained for reasoning. However, both approaches come" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "text", + "content": "32" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 31 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 82, + 541, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 82, + 541, + 167 + ], + "spans": [ + { + "bbox": [ + 67, + 82, + 541, + 167 + ], + "type": "text", + "content": "with associated costs, whether it involves generating thousands of additional tokens compared to greedy decoding during inference or training models on large-scale trajectory datasets. Consequently, cost-aware methodologies, which factor in computational costs when deciding how to allocate resources during both training and inference, or those that address sample inefficiency, have gained recent attention. Similarly, inference-aware methodologies aim to enhance the time and cost efficiency of inference scaling by explicitly incorporating inference-time scaling strategies during training. In this section, we explore emerging cost-aware and inference-aware approaches." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 180, + 198, + 192 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 180, + 198, + 192 + ], + "spans": [ + { + "bbox": [ + 69, + 180, + 198, + 192 + ], + "type": "text", + "content": "5.4.1 Cost-aware Training" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 201, + 541, + 333 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 201, + 541, + 333 + ], + "spans": [ + { + "bbox": [ + 67, + 201, + 541, + 333 + ], + "type": "text", + "content": "Learning to reduce inference cost This line of research explores strategies to optimize the tradeoff between computational cost and reasoning performance by dynamically allocating resources based on input (prompt) complexity and desired output quality. For prompt analysis, Damani et al. (2025) use a learnable model to predict the difficulty of batched queries and dynamically allocate inference budgets accordingly. Building on this, Zhang et al. (2024d) train a model to predict the most efficient combination of inference strategies, directly optimizing for pass rates. Yue et al. (2025) decompose reasoning trajectories into specific behaviors and employ a trainable planner to derive question-specific compositions, identifying the optimal reasoning strategy—such as whether question decomposition or rewriting is necessary, whether Python programs are required, or if answer verification is needed. On the output side, Snell et al. (2025) propose a look-ahead search method, similar to step-level beam search, which switches between branches based on estimated returns to minimize search costs." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 347, + 541, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 347, + 541, + 445 + ], + "spans": [ + { + "bbox": [ + 67, + 347, + 541, + 445 + ], + "type": "text", + "content": "Data-efficient training Another research direction focuses on reducing training costs by using a small set of high-quality samples (questions paired with trajectories or labels). Muennighoff et al. (2025) curate a dataset of 1,000 samples, emphasizing difficulty, diversity, and quality. Their work demonstrates that finetuning Qwen2.5-32B-Instruct on this dataset achieves performance surpassing o1-preview on competition math benchmarks. Ye et al. (2025) fine-tune Qwen2.5-32B-Instruct on 817 carefully curated training samples, achieving superior performance across a broader set of math reasoning benchmarks. Notably, Ye et al. (2025) highlight that these performance gains depend on using strong pre-trained models like Qwen2.5-32B-Instruct and do not occur with weaker models (e.g., Qwen1.5-32B-Instruct)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 457, + 218, + 470 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 457, + 218, + 470 + ], + "spans": [ + { + "bbox": [ + 69, + 457, + 218, + 470 + ], + "type": "text", + "content": "5.4.2 Inference-aware Training" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 479, + 541, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 479, + 541, + 539 + ], + "spans": [ + { + "bbox": [ + 67, + 479, + 541, + 539 + ], + "type": "text", + "content": "Existing work on inference scaling typically treats inference-time computation as a post-hoc design choice after conventional training. Inference-aware training approach challenges the assumption that decoupling training and inference-time computation is optimal. For instance, if an LLM is allowed multiple attempts to solve a math problem, fine-tuning it to explore diverse problem-solving strategies might yield better results than simply generating candidates representing its best single attempt." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 544, + 539, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 544, + 539, + 640 + ], + "spans": [ + { + "bbox": [ + 67, + 544, + 539, + 640 + ], + "type": "text", + "content": "The core idea is that explicitly considering the inference procedure during training can significantly enhance the effectiveness of inference-time computation. For example, Best-of-N (BoN) is a basic inference-time strategy that selects the highest-reward response from " + }, + { + "bbox": [ + 67, + 544, + 539, + 640 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 67, + 544, + 539, + 640 + ], + "type": "text", + "content": " candidates. However, this approach is misaligned with fine-tuning objectives. To address this, Sessa et al. (2024) propose an RL objective that distills the Best-of-N distribution into the policy model using Jeffreys divergence (Jeffreys, 1946). Similarly, Balashankar et al. (2024) develop a calibrated reward that incorporates the inference procedure (Best-of-N) during alignment. In a related effort, Chow et al. (2024) aim to optimize BoN directly, overcoming the non-differentiable argmax operator by employing a reinforcement learning framework." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 657, + 316, + 671 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 657, + 316, + 671 + ], + "spans": [ + { + "bbox": [ + 67, + 657, + 316, + 671 + ], + "type": "text", + "content": "6 Discussion: Trends and Open Challenges" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 685, + 541, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 685, + 541, + 734 + ], + "spans": [ + { + "bbox": [ + 67, + 685, + 541, + 734 + ], + "type": "text", + "content": "The field of reasoning LLMs has seen rapid advancements, with notable trends emerging in training-vs-inference regimes and architectural dimensions as we discuss in Section 6.1. Despite this progress, several challenges remain, hindering their generalizability and practical applicability. This section outlines these observed trends and highlights open challenges, along with potential directions to address them (Section 6.2)." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "33" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 32 + }, + { + "para_blocks": [ + { + "bbox": [ + 71, + 81, + 175, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 81, + 175, + 94 + ], + "spans": [ + { + "bbox": [ + 71, + 81, + 175, + 94 + ], + "type": "text", + "content": "6.1 Observed Trends" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 104, + 541, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 104, + 541, + 200 + ], + "spans": [ + { + "bbox": [ + 70, + 104, + 541, + 200 + ], + "type": "text", + "content": "Following the two dimensions outlined in Figure 2, we identify two key trends in LLM reasoning: one progresses from inference scaling to learning to reason (Section 6.1.1), while the other shifts from standalone LLMs to agentic systems (Section 6.1.2). Additionally, reasoning is ubiquitous yet challenging when developing a general-purpose reasoner. Notably, many state-of-the-art reasoning language models are predominantly focused on a few domains, particularly mathematics and coding (OpenAI et al., 2024; DeepSeek-AI et al., 2025). Whether it is possible to build a truly generalizable reasoning system remains an open question (Kang et al., 2024; Qi et al., 2024; Huang et al., 2024c; Sun et al., 2024c). However, we observe a growing trend toward developing domain-specific reasoning models (Section 6.1.3)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 71, + 212, + 317, + 225 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 212, + 317, + 225 + ], + "spans": [ + { + "bbox": [ + 71, + 212, + 317, + 225 + ], + "type": "text", + "content": "6.1.1 From Inference Scaling to Learning to Reason" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 233, + 541, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 233, + 541, + 318 + ], + "spans": [ + { + "bbox": [ + 70, + 233, + 541, + 318 + ], + "type": "text", + "content": "Since the introduction of CoT and self-consistency (Wang et al., 2023f), inference scaling techniques have emerged as a key paradigm for enhancing reasoning performance without incurring the costs associated with reasoning-specific training. Inference scaling complements learning-to-reason approaches, with recent studies demonstrating that combining self-consistency with reasoning-specific training yields further improvements (DeepSeek-AI et al., 2025; Muennighoff et al., 2025). Additionally, since the release of OpenAI's o1 (Huang et al., 2024d), some methods have sought to activate human-like reasoning patterns by introducing self-correction (Kumar et al., 2024), self-critique (Xi et al., 2024), or even MCTS Qin et al. (2024)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 323, + 541, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 323, + 541, + 430 + ], + "spans": [ + { + "bbox": [ + 70, + 323, + 541, + 430 + ], + "type": "text", + "content": "Researchers initially found that data-driven approaches, such as supervised fine-tuning (SFT) and knowledge distillation, were highly effective in enhancing LLMs' reasoning capabilities. However, these methods rely on the availability of a strong teacher model for distillation. An alternative approach uses outcome labels for iterative rejection sampling (Yuan et al., 2023), which converges quickly after a few iterations (Dong et al., 2023). These limitations have spurred the development of more data-efficient methods, such as automatic process supervision (Jiao et al., 2024a; Wang et al., 2024g;k; Luo et al., 2024b) and iterative refinement (Guan et al., 2025), which optimize training trajectories using fixed outcome labels. The release of Deepseek-R1 (DeepSeek-AI et al., 2025) further advanced the field, demonstrating the ability to generate human-like, long reasoning chains through pure reinforcement learning under outcome supervision alone." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 71, + 443, + 307, + 456 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 443, + 307, + 456 + ], + "spans": [ + { + "bbox": [ + 71, + 443, + 307, + 456 + ], + "type": "text", + "content": "6.1.2 From Standalone LLMs to Agentic Systems" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 464, + 541, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 464, + 541, + 513 + ], + "spans": [ + { + "bbox": [ + 70, + 464, + 541, + 513 + ], + "type": "text", + "content": "In Sections 2.3 and 5, we discussed how the rise of agentic systems has significantly influenced reasoning research. A clear trend has emerged, shifting from standalone LLM reasoning to agentic reasoning. This shift aligns with our expectations: reasoning is no longer confined to a single LLM but is expected to interact with the external world and other agents, as well as exhibit autonomy, such as planning capabilities." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 518, + 541, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 518, + 541, + 626 + ], + "spans": [ + { + "bbox": [ + 70, + 518, + 541, + 626 + ], + "type": "text", + "content": "On one hand, there is ongoing debate about whether agentic reasoning is always beneficial, especially for straightforward and simple tasks (Sprague et al., 2024b; Liu et al., 2024c). On the other hand, current systems' autonomy is largely limited to planning, whereas it could encompass much more. For instance, system-level or meta-level planning is essential in agentic systems, requiring the design of effective ways to connect different agents (Zhou et al., 2025a; Zhuge et al., 2024; Zhang et al., 2024c; Hu et al., 2025). A notable recent study (Ke et al., 2025c) demonstrates that such design can be with zero supervision and through self-improvement alone. Another critical aspect of autonomous agents is proactivity, yet current reasoning agents still lack the ability to proactively seek clarification or request additional information from users or the environment." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 71, + 639, + 231, + 651 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 639, + 231, + 651 + ], + "spans": [ + { + "bbox": [ + 71, + 639, + 231, + 651 + ], + "type": "text", + "content": "6.1.3 Domain-Specific Reasoners" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 659, + 541, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 659, + 541, + 732 + ], + "spans": [ + { + "bbox": [ + 70, + 659, + 541, + 732 + ], + "type": "text", + "content": "Mathematical reasoning Mathematics serves as an ideal testbed for studying LLM reasoning capabilities due to its structured nature and clear evaluation criteria. Mathematical reasoning has evolved along two complementary paths. The first, often referred to as the \"informal approach\" (Yang et al., 2024d), treats mathematical problems as natural language tasks and fine-tunes LLMs on carefully curated or filtered problem-solving datasets. Systems like NuminaMath (Fleureau et al., 2024), DeepSeekMath (Shao et al., 2024), Llemma (Azerbayev et al., 2024), and MetaMath (Yu et al., 2024b) have demonstrated remarkable" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 751, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 312, + 761 + ], + "type": "text", + "content": "34" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 33 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 82, + 541, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 82, + 541, + 178 + ], + "spans": [ + { + "bbox": [ + 67, + 82, + 541, + 178 + ], + "type": "text", + "content": "capabilities by combining mathematical text training (pre-training, supervised fine-tuning, and reinforcement learning), tree-based search, tool-integrated reasoning, and various inference scaling techniques discussed in earlier sections. This approach has achieved significant success across benchmarks ranging from GSM8K (Cobbe et al., 2021) and MATH (Hendrycks et al., 2021b) to competition-level problems such as AIMO (Markets, 2024) and AIME-level problems (aim, 2025). However, challenges persist in tackling college-level and advanced mathematics, where high-quality training data is scarce, and verifying complex multi-step reasoning becomes increasingly difficult. Spatial reasoning (e.g., counting, navigation, and inferring spatial relationships) presents another challenge for LLMs and multi-modal LLMs (Wang et al., 2024b)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 183, + 541, + 328 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 183, + 541, + 328 + ], + "spans": [ + { + "bbox": [ + 67, + 183, + 541, + 328 + ], + "type": "text", + "content": "Complementing the informal approach, formal mathematical reasoning grounds systems in precise symbolic frameworks, such as proof assistants like Isabelle (Nipkow et al., 2002), Lean (De Moura et al., 2015), and Coq (Barras et al., 1997; The Coq Development Team, 2024). Recent advances in this direction include neural theorem-proving systems that combine tactic generation with proof search (Yang et al., 2023b; Thakur et al., 2024), as well as autoformalization techniques that translate between natural and formal mathematics (Wu et al., 2022; Jiang et al., 2024a). The formal approach offers several advantages: automatic verification of reasoning steps, generation of training signals from the verification environment, and the potential to bootstrap capabilities through learned abstractions. For example, AlphaProof (AlphaProof & teams, 2024) and AlphaGeometry (Trinh et al., 2024) demonstrate the power of integrating neural networks with symbolic verification, achieving groundbreaking performance on Olympic-level mathematics problems. A recent position paper by Yang et al. (2024d) argues that formal mathematical reasoning represents a critical frontier for advancing AI's ability to tackle increasingly abstract and complex mathematical problems." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 342, + 541, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 342, + 541, + 485 + ], + "spans": [ + { + "bbox": [ + 67, + 342, + 541, + 485 + ], + "type": "text", + "content": "Code generation Code serves as a more formal language for reasoning. Given the complexity of generating entire programs, earlier studies primarily focused on function-level code completion, as demonstrated by benchmarks such as HumanEval (Chen et al., 2021) and MBPP (Austin et al., 2021). With stronger foundation models trained on extensive code corpora (Zhu et al., 2024a; Hui et al., 2024), the focus of evaluation has shifted toward general competition programming (Hendrycks et al., 2021a; Jain et al., 2024). The earliest significant attempt to solve competition-level coding problems through large-scale training was AlphaCode (Li et al., 2022). Similar to the general domain, the training paradigm has evolved from instruction tuning (Wei et al., 2024) to RL and preference learning based on test cases and compiler feedback (Dou et al., 2024a; Weyssow et al., 2024; Jiao et al., 2025; Huang et al., 2024b). The recent releases of DeepSeek-R1 (DeepSeek-AI et al., 2025) and OpenAI's o3 (OpenAI et al., 2025) have further advanced the field by enabling end-to-end RL through outcome supervision. OpenAI et al. (2025) also highlight that purely data-driven approaches can outperform models incorporating human-experience-based competition strategies." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 491, + 541, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 491, + 541, + 587 + ], + "spans": [ + { + "bbox": [ + 67, + 491, + 541, + 587 + ], + "type": "text", + "content": "Another important application of code generation is in software engineering, where advancements in LLMs are making fully automated pipelines increasingly feasible. SWE-Bench (Jimenez et al., 2024), a benchmark based on GitHub issues, challenges LLMs with real-world software engineering problems. These tasks require coupled abilities, such as long-context modeling to process repository-level inputs, logical reasoning to locate bugs and design unit tests, and programming to implement solutions. Wei et al. (2025) pioneer the use of end-to-end RL for optimizing automatic debugging. Specifically, they select pull requests (PRs) from GitHub linked to issues and use the consistency between the predicted code snippet and the repository's code after the PR is merged as the reward signal." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 601, + 541, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 601, + 541, + 733 + ], + "spans": [ + { + "bbox": [ + 67, + 601, + 541, + 733 + ], + "type": "text", + "content": "Tabular reasoning Reasoning over tabular (or structured) data, which involves generating responses based on user queries and provided tables, plays a vital role in improving data analysis efficiency (Lu et al., 2025). A critical aspect of tabular reasoning with LLMs involves transforming structured data into a format that these models can process effectively. Techniques such as serialization (Chen, 2023; Cheng et al., 2023; Chen et al., 2023e), prompt engineering (Ye et al., 2023b; Lin et al., 2023b; Wang et al., 2024n; Zhang et al., 2024j), and embedding methods (Herzig et al., 2020) have been widely studied to facilitate this adaptation, converting tabular data into human-readable text or leveraging specialized table representations. Additionally, specialized prompting of LLMs with transformed tabular data is crucial. For instance, Pourreza & Rafiei (2023); Ye et al. (2023c) find that LLMs perform better on decomposed sub-tasks than on the entire table reasoning task. However, LLMs may still struggle with certain sub-tasks. To address this, (Cao et al., 2023) employ diverse tools for specific sub-tasks, while (Lin et al., 2023b;a) focus on retrieving relevant" + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "35" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 34 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 82, + 541, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 82, + 541, + 118 + ], + "spans": [ + { + "bbox": [ + 67, + 82, + 541, + 118 + ], + "type": "text", + "content": "tables. Notably, (Jiang et al., 2023) propose a unified approach to enhance LLM reasoning over structured data by designing specialized interfaces. These interfaces extract relevant evidence from structured data, enabling LLMs to focus on reasoning based on the gathered information." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 124, + 541, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 124, + 541, + 232 + ], + "spans": [ + { + "bbox": [ + 67, + 124, + 541, + 232 + ], + "type": "text", + "content": "Despite the promising results of various adaptation methods, significant challenges remain. First, tabular data often comprises diverse feature types—categorical, numerical, and textual—adding complexity to modeling (Borisov et al., 2023; Gruver et al., 2023). Second, the effectiveness (Sui et al., 2024) and robustness (Liu et al., 2024d) of LLMs in tabular tasks heavily depend on proper prompt design and data preprocessing. Poor or out-of-distribution preprocessing can lead to information loss, misinterpretation, multicollinearity, and interpretability issues, significantly degrading performance (Sui et al., 2024). Finally, LLMs are prone to hallucinations (Ye et al., 2023d) and fairness concerns (Liu et al., 2023), limiting their reliability. For a comprehensive overview, see recent surveys on LLMs for table reasoning (Fang et al., 2024b; Dong & Wang, 2024; Zhang et al., 2025a; Lu et al., 2025)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 249, + 541, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 249, + 541, + 308 + ], + "spans": [ + { + "bbox": [ + 67, + 249, + 541, + 308 + ], + "type": "text", + "content": "Reasoning in multi-agent games In game-theoretic scenarios involving both collaboration and competition, strategic social reasoning skills are essential (Lee et al., 2024). Strategic reasoning refers to the cognitive process of making decisions in complex social situations. As highlighted by Feng et al. (2024b), the complexity and challenges of this reasoning stem from the involvement of multiple parties and the dynamic nature of the environment." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 314, + 541, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 314, + 541, + 483 + ], + "spans": [ + { + "bbox": [ + 67, + 314, + 541, + 483 + ], + "type": "text", + "content": "To capture the cognitive states of multiple parties, the concept of Theory-of-Mind (ToM) (Zhang et al., 2012) has been integrated into modeling processes. ToM attributes mental states—such as beliefs, intentions, desires, emotions, and knowledge—to oneself and others. Recent studies (Kosinski, 2024) have shown that LLMs exhibit ToM capabilities, and researchers have leveraged these capabilities to enhance strategic reasoning in social scenarios. For instance, Guo et al. (2023) computationally model the beliefs, intents, and potential behaviors of teammates and opponents to improve understanding and reasoning in games. Similarly, TOMABD (Montes et al., 2023) incorporates ToM into agents to enhance their reasoning and decision-making abilities. To address the complexity of dynamic social interactions (Li et al., 2024d), prior research employs RL methods to explore potential behaviors and evaluate different states (Seo & Lee, 2017; Wen et al., 2019). Additionally, some studies introduce modular frameworks to improve strategic reasoning in complex scenarios. For example, ReTA (Duan et al., 2024) uses LLM-based modules as the main actor, reward actor, and anticipation actor, inspired by minimax game theory. Recent work (Trecsenyi et al., 2025) has also begun exploring role-based multi-agent interactions to enable more sophisticated strategic reasoning. These approaches collectively enhance LLMs' strategic reasoning capabilities in dynamic environments." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 499, + 541, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 499, + 541, + 619 + ], + "spans": [ + { + "bbox": [ + 67, + 499, + 541, + 619 + ], + "type": "text", + "content": "Reward modeling and evaluation as a reasoning task Evaluation, whether as an end goal or a component of a larger reasoning system, remains a significant challenge. While using PRMs to enhance reasoning abilities is popular during both inference and training, training these models requires extensive step-by-step annotations (Lightman et al., 2024). To address this, recent approaches have introduced automated feedback mechanisms, such as tree search (Wang et al., 2024g; Chen et al., 2024a; Setlur et al., 2024a; Luo et al., 2024c; Wang et al., 2024l) or, less frequently, LLM-as-judge (Zhang et al., 2025b). Although these methods avoid human preference annotations, they often rely on trajectories sampled from a fixed policy model, which may not align well with the problem distribution. This misalignment leads to poor generalization, as highlighted by Zheng et al. (2024). Consequently, the next frontier in reward modeling will need to combine automated data collection with diverse data sources to achieve annotation-efficient generalization." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 624, + 541, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 624, + 541, + 733 + ], + "spans": [ + { + "bbox": [ + 67, + 624, + 541, + 733 + ], + "type": "text", + "content": "While reasoning in LLM-as-judges is not explicitly addressed, recent training and inference techniques have drawn from established methods for improving reasoning. Judge-based assessment inherently involves a finite set of outcomes (e.g., A or B for pairwise judgments or 1-5 for single ratings), making it suitable for self-consistency decoding (Kim et al., 2024b). More advanced inference-time approaches, such as multi-judge or multi-round discussions (Li et al., 2023d; Chan et al., 2023; Verga et al., 2024; Yu et al., 2024d), self-rationalization (Trivedi et al., 2024), or sequential escalation (Jung et al., 2024), have been proposed. Concurrently, training-time solutions for LLM-as-judges focus on distilling chain-of-thought judgments from larger teacher models and fine-tuning smaller judges via supervised fine-tuning (Wang et al., 2023g; Li et al., 2023b; Kim et al., 2023; 2024c; Vu et al., 2024) or preference optimization (Hu et al., 2024; Wang et al.," + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "36" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 35 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 82, + 541, + 143 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 82, + 541, + 143 + ], + "spans": [ + { + "bbox": [ + 67, + 82, + 541, + 143 + ], + "type": "text", + "content": "2024f; Ye et al., 2024; Saad-Falcon et al., 2024; Deshpande et al., 2024; Wang et al., 2024j). Despite these advancements, such models still struggle in reasoning-intensive domains (Tan et al., 2024; Zhou et al., 2025b; Xu et al., 2025b), whereas stronger reasoning models have outperformed specialized judge models in more difficult evaluation settings (Xu et al., 2025a). In all, recent benchmarking results highlight that developing reasoning-specific judges remains an open and challenging research area." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 156, + 174, + 169 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 156, + 174, + 169 + ], + "spans": [ + { + "bbox": [ + 69, + 156, + 174, + 169 + ], + "type": "text", + "content": "6.2 Open Challenges" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 178, + 541, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 178, + 541, + 275 + ], + "spans": [ + { + "bbox": [ + 67, + 178, + 541, + 275 + ], + "type": "text", + "content": "Despite the trends observed in Section 6.1, several challenges remain. First, how can we effectively evaluate both the reasoning outcome and the reasoning chain? (Section 6.2.1). Second, do we truly understand reasoning? Does the reasoning chain generated by next-token sampling faithfully reflect the internal reasoning process of an LLM, or is it merely imitating its training data? (Section 6.2.2). Third, training of LLM reasoning system is still largely hindered by substantial data requirements, which include both more challenging questions and the corresponding outcome labels. This not only affects the end-to-end reasoner training, but also limits our exploration in building stronger reward models to facilitate inference time scaling (Section 6.2.3)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 288, + 204, + 300 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 288, + 204, + 300 + ], + "spans": [ + { + "bbox": [ + 69, + 288, + 204, + 300 + ], + "type": "text", + "content": "6.2.1 Evaluating Reasoning" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 308, + 541, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 308, + 541, + 380 + ], + "spans": [ + { + "bbox": [ + 67, + 308, + 541, + 380 + ], + "type": "text", + "content": "As language models and agentic systems tackle increasingly complex tasks, evaluating their performance becomes equally challenging. Currently, progress in LLM reasoning is measured by outcome performance on fixed benchmarks (e.g., MATH (Hendrycks et al., 2021b)). However, relying solely on outcomes to verify reasoning correctness may be insufficient, as a correct final answer does not guarantee a logically sound reasoning chain (Hao et al., 2024a). Prior work has shown that LLMs often produce unfaithful reasoning chains, even when the final answers are correct (Wiegreffe et al., 2022; Lyu et al., 2023; Wang et al., 2023b)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 386, + 541, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 386, + 541, + 553 + ], + "spans": [ + { + "bbox": [ + 67, + 386, + 541, + 553 + ], + "type": "text", + "content": "Evaluating reasoning beyond outcomes remains an open and challenging problem. Early approaches relied on human annotators to assess the quality of generated explanations (Camburu et al., 2018; Rajani et al., 2019), focusing on whether the reasoning could lead to the same predictions. To scale this idea, follow-up works (Wiegreffe et al., 2020; Hase et al., 2020) used trained models as simulators to evaluate the alignment between generated reasoning and final predictions. When human-annotated reasoning chains are available, some studies leverage traditional NLG metrics to measure overlap between human- and model-generated explanations (Clinciu et al., 2021). Others propose reasoning-specific metrics to assess aspects like coherency, redundancy, factuality (Golovneva et al., 2022), informativeness (Chen et al., 2022), robustness (Wang & Zhao, 2024), and contextual faithfulness (Ming et al., 2025). Under the LLM-as-Judge paradigm, recent works prompt powerful LLMs like GPT-4 to directly evaluate reasoning chains generated by other models (Hao et al., 2024a; Sun et al., 2024b). However, as reasoning tasks grow in complexity, evaluation becomes increasingly difficult, even for frontier models—if a model cannot perform a task, how can it judge if the task is done correctly? Thus, developing robust and accurate methods to evaluate reasoning beyond outcomes remains a significant and unresolved challenge." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 567, + 222, + 578 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 567, + 222, + 578 + ], + "spans": [ + { + "bbox": [ + 69, + 567, + 222, + 578 + ], + "type": "text", + "content": "6.2.2 Understanding Reasoning" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 587, + 541, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 587, + 541, + 635 + ], + "spans": [ + { + "bbox": [ + 67, + 587, + 541, + 635 + ], + "type": "text", + "content": "Recent research on understanding LLM reasoning has advanced along two complementary paths: empirical studies that evaluate and analyze performance through carefully designed and controlled experiments, and formal analyses that introduce new frameworks to systematically explore the underlying mechanisms of how LLMs reason." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 648, + 539, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 648, + 539, + 733 + ], + "spans": [ + { + "bbox": [ + 67, + 648, + 539, + 733 + ], + "type": "text", + "content": "Empirical analysis of reasoning Recent LLMs exhibit strong performance across diverse tasks, suggesting some level of reasoning capability. However, whether these skills are general and transferable or merely specialized for tasks encountered during pretraining remains an open and debated question. To address this, several empirical studies have sought to understand and enhance LLM capabilities across various reasoning forms: abstractive reasoning (Wu et al., 2024a; He & Lu, 2024), compositional reasoning (Bhargava & Ng, 2022; Li et al., 2024g), inductive reasoning (Yang et al., 2024f; Han et al., 2024b), abductive reasoning (Jung et al., 2022; Pareschi, 2023), deductive reasoning (Poesia et al., 2024; Seals & Shalin, 2024; Feng et al.," + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "text", + "content": "37" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 36 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 82, + 541, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 82, + 541, + 178 + ], + "spans": [ + { + "bbox": [ + 67, + 82, + 541, + 178 + ], + "type": "text", + "content": "2024a), logical reasoning (Wan et al., 2024b; Han et al., 2024a; Xu et al., 2025c), commonsense reasoning (Lin et al., 2021; Liang et al., 2023a; Sun et al., 2024a), math reasoning (Ahn et al., 2024; Mirzadeh et al., 2025), and social reasoning (Gandhi et al., 2023). Notably, Arkoudas (2023) qualitatively evaluate GPT-4 on 21 diverse reasoning problems, concluding that despite occasional analytical success, GPT-4 remains incapable of true reasoning. Similarly, Wu et al. (2024a) empirically investigate abstractive reasoning and find that while LLMs achieve nontrivial performance on counterfactual tasks, their performance consistently degrades compared to default conditions, indicating reliance on narrow, non-transferable procedures. Mondorf & Plank (2024) provide a comprehensive survey on recent evaluations of LLM reasoning abilities." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 71, + 183, + 541, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 183, + 541, + 388 + ], + "spans": [ + { + "bbox": [ + 71, + 183, + 541, + 388 + ], + "type": "text", + "content": "Beyond assessing LLM reasoning capabilities, there is growing interest in evaluating how test-time scaling methods enhance reasoning. The empirical success of CoT prompting has spurred extensive research into its mechanisms. Wang et al. (2023a) and Madaan et al. (2023a) investigate the role of demonstrations, finding that LLMs prioritize pattern consistency over accuracy and exhibit robustness to invalid demonstrations—particularly in mathematical reasoning, where incorrect equations often do not hinder performance. They also emphasize the importance of relevant rationales and logical progression in CoT prompts. Additionally, Madaan et al. (2023a) conclude that CoT aids models by supplementing missing information, such as commonsense knowledge, and reinforcing task understanding. From a modeling perspective, Dutta et al. (2024) analyze CoT through neural mechanisms, revealing that LLMs process input context and generated CoT via parallel pathways. They find that early layers (e.g., layers 1-16 in Llama-2 7B (Touvron et al., 2023)) rely on pretraining knowledge, while later layers specialize in in-context learning, with answer-writing heads emerging in the final layers. From a task perspective, Sprague et al. (2024a) conduct a meta-analysis of 100 CoT papers, showing that CoT significantly improves performance on mathematical, logical, and algorithmic reasoning tasks but offers minimal gains for non-symbolic tasks. Their analysis suggests that CoT excels in computational steps but struggles with tool-augmented reasoning. On the training front, Gao et al. (2024a); Zhang et al. (2025b); Yeo et al. (2025) explore key supervised fine-tuning (SFT) and reinforcement learning (RL) factors that optimize LLM training strategies for enhancing CoT reasoning." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 404, + 541, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 404, + 541, + 584 + ], + "spans": [ + { + "bbox": [ + 67, + 404, + 541, + 584 + ], + "type": "text", + "content": "Formal analysis of reasoning There is increasing interest in formal analyses, which use structured and logical proofs to systematically evaluate and improve the reasoning capabilities of LLMs. Han et al. (2022) introduce FOLIO, a dataset designed to assess models' ability to derive correct conclusions from premises using first-order logic reasoning. Similarly, Saparov & He (2023) develop a benchmark evaluating LLMs on symbolic ontologies, revealing that models often struggle with proof planning and rely on knowledge retrieval rather than genuine reasoning. These findings highlight the potential of neurosymbolic methods to better understand LLM reasoning. Recent work also explores formal analysis techniques to enhance LLM reasoning. For instance, Pan et al. (2023) use LLMs to translate natural language problems into symbolic formulations, which are then processed by deterministic symbolic solvers for inference. (Li et al., 2025b) demonstrate the promise of leveraging LLMs' symbolic reasoning for mathematical problem-solving. Other studies focus on domain-specific reasoning: Fang et al. (2024a) propose an LLM-based agent for text-based games, designed to tackle symbolic challenges and achieve in-game objectives, while Nahid & Rafiei (2024) introduce a framework to enhance LLMs' symbolic reasoning by normalizing web tables. These studies reveal LLMs' limitations in structured reasoning while emphasizing the value of integrating formal analysis to strengthen their capabilities." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 600, + 541, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 600, + 541, + 732 + ], + "spans": [ + { + "bbox": [ + 67, + 600, + 541, + 732 + ], + "type": "text", + "content": "Theoretical analysis of ICL and CoT reasoning The success of in-context learning (ICL) and CoT prompting in enhancing LLM reasoning has sparked significant interest in understanding their underlying mechanisms from theoretical perspectives. Extensive prior studies on ICL suggest that transformer-based in-context learners effectively implement various learning algorithms, encoding implicit, context-dependent models for generation within their hidden activations—models that can be trained through demonstrations as these activations are computed. For instance, Akyurek et al. (2022) investigate this hypothesis in the context of linear regression models, while Von Oswald et al. (2023) and Dai et al. (2023) explore how transformer-based in-context learners function as meta-optimizers, effectively learning models via gradient descent during their forward pass. From a Bayesian inference perspective, Xie et al. (2022); Zhang et al. (2023) and Wang et al. (2023e) demonstrate that transformer-based in-context learners can achieve the Bayes-optimal predictor when demonstrations are selected based on a shared latent concept variable, such" + } + ] + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "38" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 37 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 82, + 541, + 130 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 82, + 541, + 130 + ], + "spans": [ + { + "bbox": [ + 67, + 82, + 541, + 130 + ], + "type": "text", + "content": "as format or task information, even in the presence of distribution mismatches between demonstrations and training data. Additionally, Elhage et al. (2021); Olsson et al. (2022) examine ICL through the concept of \"induction heads\" - attention heads that implement a simple algorithm to complete tasks, providing evidence that induction heads may underlie much of the in-context learning observed in transformer-based models." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 136, + 541, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 136, + 541, + 304 + ], + "spans": [ + { + "bbox": [ + 67, + 136, + 541, + 304 + ], + "type": "text", + "content": "The body of work exploring the theoretical insights into CoT mechanisms remains relatively limited, with most studies focusing on the expressiveness of LLMs when using CoT. A pioneering study by Feng et al. (2023a) investigates LLMs with CoT for solving mathematical and decision-making problems. Using circuit complexity theory (Arora & Barak, 2009), they demonstrate that bounded-depth transformers cannot solve basic arithmetic or equation tasks unless the model size grows super-polynomially. In contrast, they prove that constant-size models can solve these tasks, along with a wide range of decision-making problems such as Dynamic Programming, by generating CoT derivations in a common mathematical language. Li et al. (2024h) extend these findings, providing a tighter upper bound on the expressiveness of constant-depth transformers with CoT. However, these studies do not explore how the length of a CoT affects model reasoning power. To address this gap, Merrill & Sabharwal (2024) find that a logarithmic number of intermediate steps (relative to input length) offers only marginal gains over standard transformers, while a linear number of steps under the assumption of projected pre-norm (a slight generalization of standard pre-norm) enables the recognition of all regular languages. Furthermore, polynomially many steps, combined with generalized pre-norm, allow transformers to recognize exactly the class of polynomial-time solvable problems." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 320, + 349, + 334 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 320, + 349, + 334 + ], + "spans": [ + { + "bbox": [ + 67, + 320, + 349, + 334 + ], + "type": "text", + "content": "6.2.3 Data Challenges in Advancing Reasoning Capabilities" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 342, + 541, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 342, + 541, + 440 + ], + "spans": [ + { + "bbox": [ + 67, + 342, + 541, + 440 + ], + "type": "text", + "content": "Challenges in scaling question and outcome supervision for RL As discussed earlier, development trends in both general and task-specific domains are converging, with a focus on employing end-to-end RL to minimize inductive bias and push the boundaries of intelligence. Frontier models now incorporate competition-level problems annually for training, as these represent the most challenging tasks and are annotated with high-quality answers by human experts. However, we are nearing the limits of available human-annotated data, raising the question of whether methods beyond human labeling can enable the continuous scaling of RL. This challenge is particularly relevant in domains where prompts are not easily verifiable, such as open-ended generation, software engineering, and most agentic tasks." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 456, + 541, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 456, + 541, + 564 + ], + "spans": [ + { + "bbox": [ + 67, + 456, + 541, + 564 + ], + "type": "text", + "content": "Challenges in reward modeling Early studies have investigated the feasibility of process supervision (Lightman et al., 2024) and its effectiveness in inference-time scaling (Snell et al., 2025). However, its high annotation costs and ambiguous definition—particularly in long CoT scenarios where self-reflection is encouraged—have limited its adoption in large-scale reinforcement learning. Despite these challenges, the key advantage of accurate process supervision is its ability to reduce hallucinations, making it essential for automated reasoning and knowledge discovery. Additionally, as discussed in Section 4.2, the training paradigm for reward models is closely tied to that of reasoning models. This raises concerns about whether allocating the same annotation budget directly to reasoning models could lead to more stable and general improvements, potentially limiting the gains achievable through inference-time scaling." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 583, + 151, + 596 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 583, + 151, + 596 + ], + "spans": [ + { + "bbox": [ + 69, + 583, + 151, + 596 + ], + "type": "text", + "content": "7 Conclusion" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 613, + 541, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 613, + 541, + 733 + ], + "spans": [ + { + "bbox": [ + 67, + 613, + 541, + 733 + ], + "type": "text", + "content": "In this work, we provide a timely and comprehensive survey on LLM reasoning. We first formalize the goal of LLM reasoning and consolidate past research by categorizing reasoning techniques along two dimensions: regimes and architectures. Within each of these dimensions, we review both input and output perspectives in detail. Our review highlights emerging trends, including the shift from inference-time scaling to learning-to-reason regimes, and the transition from standalone models to agentic systems. We also review and compare a wide range of learning algorithms, including supervised fine-tuning and reinforcement learning, as well as the training of reasoners and training of verifiers. Despite these advancements, challenges remain in evaluating reasoning and understanding real reasoning mechanisms as well as addressing data challenges in advancing reasoning capabilities. We encourage future research to further explore these trends, such as inference-aware learning-to-reason and automated multi-agent design, to enhance LLM reasoning." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 751, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 761 + ], + "type": "text", + "content": "39" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 38 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 82, + 155, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 82, + 155, + 95 + ], + "spans": [ + { + "bbox": [ + 69, + 82, + 155, + 95 + ], + "type": "text", + "content": "Acknowledgments" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 102, + 420, + 114 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 102, + 420, + 114 + ], + "spans": [ + { + "bbox": [ + 69, + 102, + 420, + 114 + ], + "type": "text", + "content": "We thank M Saiful Bari, Semih Yavuz and Yingbo Zhou for helpful discussions." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 129, + 132, + 141 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 129, + 132, + 141 + ], + "spans": [ + { + "bbox": [ + 69, + 129, + 132, + 141 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 148, + 541, + 732 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 70, + 148, + 539, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 148, + 539, + 172 + ], + "spans": [ + { + "bbox": [ + 70, + 148, + 539, + 172 + ], + "type": "text", + "content": "American invitational mathematics examination. Mathematical Association of America, 2025. https://maa.org/maa-invitational-competitions/." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 178, + 541, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 178, + 541, + 215 + ], + "spans": [ + { + "bbox": [ + 70, + 178, + 541, + 215 + ], + "type": "text", + "content": "Rishabh Agarwal, Avi Singh, Lei Zhang, Bernd Bohnet, Luis Rosias, Stephanie Chan, Biao Zhang, Ankesh Anand, Zaheer Abbas, Azade Nova, et al. Many-shot in-context learning. Advances in Neural Information Processing Systems, 37:76930-76966, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 221, + 541, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 221, + 541, + 281 + ], + "spans": [ + { + "bbox": [ + 70, + 221, + 541, + 281 + ], + "type": "text", + "content": "Sweta Agrawal, Chunting Zhou, Mike Lewis, Luke Zettlemoyer, and Marjan Ghazvininejad. In-context examples selection for machine translation. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Findings of the Association for Computational Linguistics: ACL 2023, pp. 8857-8873, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-acl.564. URL https://aclanthology.org/2023-findings-acl.564/." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 288, + 539, + 324 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 288, + 539, + 324 + ], + "spans": [ + { + "bbox": [ + 70, + 288, + 539, + 324 + ], + "type": "text", + "content": "Arash Ahmadian, Chris Cremer, Matthias Gallé, Marzieh Fadaee, Julia Kreutzer, Olivier Pietquin, Ahmet Üstün, and Sara Hooker. Back to basics: Revisiting reinforce style optimization for learning from human feedback in llms. arXiv preprint arXiv:2402.14740, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 331, + 541, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 331, + 541, + 392 + ], + "spans": [ + { + "bbox": [ + 70, + 331, + 541, + 392 + ], + "type": "text", + "content": "Janice Ahn, Rishu Verma, Renze Lou, Di Liu, Rui Zhang, and Wenpeng Yin. Large language models for mathematical reasoning: Progresses and challenges. In Neele Falk, Sara Papi, and Mike Zhang (eds.), Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics: Student Research Workshop, pp. 225-237, St. Julian's, Malta, March 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.eacl-srw.17/." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 397, + 539, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 397, + 539, + 434 + ], + "spans": [ + { + "bbox": [ + 70, + 397, + 539, + 434 + ], + "type": "text", + "content": "Afra Feyza Akyürek, Ekin Akyürek, Aman Madaan, Ashwin Kalyan, Peter Clark, Derry Wijaya, and Niket Tandon. Rl4f: Generating natural language feedback with reinforcement learning for repairing model outputs. arXiv preprint arXiv:2305.08844, 2023." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 440, + 541, + 476 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 440, + 541, + 476 + ], + "spans": [ + { + "bbox": [ + 70, + 440, + 541, + 476 + ], + "type": "text", + "content": "Ekin Akyürek, Dale Schuurmans, Jacob Andreas, Tengyu Ma, and Denny Zhou. What learning algorithm is in-context learning? investigations with linear models. In The Eleventh International Conference on Learning Representations, 2022." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 483, + 541, + 518 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 483, + 541, + 518 + ], + "spans": [ + { + "bbox": [ + 70, + 483, + 541, + 518 + ], + "type": "text", + "content": "AlphaProof and AlphaGeometry teams. AI achieves silver-medal standard solving international mathematical olympiad problems. https://deepmind.google/discover/blog/ai-solves-imo-problems-at-silver-medal-level/, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 525, + 435, + 538 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 525, + 435, + 538 + ], + "spans": [ + { + "bbox": [ + 70, + 525, + 435, + 538 + ], + "type": "text", + "content": "Konstantine Arkoudas. Gpt-4 can't reason. arXiv preprint arXiv:2308.03762, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 544, + 541, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 544, + 541, + 567 + ], + "spans": [ + { + "bbox": [ + 70, + 544, + 541, + 567 + ], + "type": "text", + "content": "Sanjeev Arora and Boaz Barak. Computational complexity: a modern approach. Cambridge University Press, 2009." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 574, + 541, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 574, + 541, + 647 + ], + "spans": [ + { + "bbox": [ + 70, + 574, + 541, + 647 + ], + "type": "text", + "content": "Krishna Aswani, Huilin Lu, Pranav Patankar, Priya Dhalwani, Xue Tan, Jayant Ganeshmohan, and Simon Lacasse. Auto-evolve: Enhancing large language model's performance via self-reasoning framework. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (eds.), Findings of the Association for Computational Linguistics: EMNLP 2024, pp. 13243-13257, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-emnlp.774. URL https://aclanthology.org/2024-findings-emnlp.774/." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 70, + 653, + 541, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 653, + 541, + 690 + ], + "spans": [ + { + "bbox": [ + 70, + 653, + 541, + 690 + ], + "type": "text", + "content": "Jacob Austin, Augustus Odena, Maxwell Nye, Maarten Bosma, Henryk Michalewski, David Dohan, Ellen Jiang, Carrie Cai, Michael Terry, Quoc Le, et al. Program synthesis with large language models. arXiv preprint arXiv:2108.07732, 2021." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 70, + 696, + 541, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 696, + 541, + 732 + ], + "spans": [ + { + "bbox": [ + 70, + 696, + 541, + 732 + ], + "type": "text", + "content": "Mohammad Gheshlaghi Azar, Zhaohan Daniel Guo, Bilal Piot, Remi Munos, Mark Rowland, Michal Valko, and Daniele Calandriello. A general theoretical paradigm to understand learning from human preferences. In International Conference on Artificial Intelligence and Statistics, pp. 4447-4455. PMLR, 2024." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "text", + "content": "40" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 39 + }, + { + "para_blocks": [ + { + "bbox": [ + 70, + 81, + 541, + 732 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 70, + 81, + 541, + 120 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 81, + 541, + 120 + ], + "spans": [ + { + "bbox": [ + 70, + 81, + 541, + 120 + ], + "type": "text", + "content": "Zhangir Azerbayev, Hailey Schoelkopf, Keiran Paster, Marco Dos Santos, Stephen Marcus McAleer, Albert Q Jiang, Jia Deng, Stella Biderman, and Sean Welleck. LLemma: An open language model for mathematics. In International Conference on Learning Representations (ICLR), 2024." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 127, + 541, + 166 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 127, + 541, + 166 + ], + "spans": [ + { + "bbox": [ + 70, + 127, + 541, + 166 + ], + "type": "text", + "content": "Yuntao Bai, Andy Jones, Kamal Ndousse, Amanda Askell, Anna Chen, Nova DasSarma, Dawn Drain, Stanislav Fort, Deep Ganguli, Tom Henighan, et al. Training a helpful and harmless assistant with reinforcement learning from human feedback. arXiv preprint arXiv:2204.05862, 2022." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 174, + 541, + 222 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 174, + 541, + 222 + ], + "spans": [ + { + "bbox": [ + 70, + 174, + 541, + 222 + ], + "type": "text", + "content": "Ananth Balashankar, Ziteng Sun, Jonathan Berant, Jacob Eisenstein, Michael Collins, Adrian Hutter, Jong Lee, Chirag Nagpal, Flavien Prost, Aradhana Sinha, Ananda Theertha Suresh, and Ahmad Beirami. Infalign: Inference-aware language model alignment. CoRR, abs/2412.19792, 2024. doi: 10.48550/ARXIV.2412.19792. URL https://doi.org/10.48550/arXiv.2412.19792." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 232, + 541, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 232, + 541, + 270 + ], + "spans": [ + { + "bbox": [ + 70, + 232, + 541, + 270 + ], + "type": "text", + "content": "Bruno Barras, Samuel Boutin, Cristina Cornes, Judicael Courant, Jean-Christophe Filliatre, Eduardo Gimenez, Hugo Herbelin, Gerard Huet, Cesar Munoz, Chetan Murthy, et al. The Coq proof assistant reference manual: Version 6.1. PhD thesis, Inria, 1997." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 278, + 541, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 278, + 541, + 316 + ], + "spans": [ + { + "bbox": [ + 70, + 278, + 541, + 316 + ], + "type": "text", + "content": "Richard Bellman. Dynamic programming and stochastic control processes. Information and Control, 1 (3):228-239, 1958. ISSN 0019-9958. doi: https://doi.org/10.1016/S0019-9958(58)80003-0. URL https://www.sciencedirect.com/science/article/pii/S0019995858800030." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 324, + 541, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 324, + 541, + 361 + ], + "spans": [ + { + "bbox": [ + 70, + 324, + 541, + 361 + ], + "type": "text", + "content": "Bespoke Labs. Bespoke-stratos: The unreasonable effectiveness of reasoning distillation. www.bespokelabs.ai/blog/bespoke-stratos-the-unreasonable-effectiveness-of-reasoning-distillation, 2025. Accessed: 2025-01-22." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 370, + 541, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 370, + 541, + 419 + ], + "spans": [ + { + "bbox": [ + 70, + 370, + 541, + 419 + ], + "type": "text", + "content": "Maciej Besta, Nils Blach, Ales Kubicek, Robert Gerstenberger, Michal Podstawski, Lukas Gianinazzi, Joanna Gajda, Tomasz Lehmann, Hubert Niewiadomski, Piotr Nczyk, et al. Graph of thoughts: Solving elaborate problems with large language models. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 38, pp. 17682-17690, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 428, + 541, + 466 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 428, + 541, + 466 + ], + "spans": [ + { + "bbox": [ + 70, + 428, + 541, + 466 + ], + "type": "text", + "content": "Maciej Besta, Julia Barth, Eric Schreiber, Ales Kubicek, Afonso Catarino, Robert Gerstenberger, Piotr Nczyk, Patrick Iff, Yueling Li, Sam Houliston, et al. Reasoning language models: A blueprint. arXiv preprint arXiv:2501.11223, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 475, + 541, + 512 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 475, + 541, + 512 + ], + "spans": [ + { + "bbox": [ + 70, + 475, + 541, + 512 + ], + "type": "text", + "content": "Prajjwal Bhargava and Vincent Ng. Commonsense knowledge reasoning and generation with pre-trained language models: A survey. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 36, pp. 12317-12325, 2022." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 521, + 541, + 559 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 521, + 541, + 559 + ], + "spans": [ + { + "bbox": [ + 70, + 521, + 541, + 559 + ], + "type": "text", + "content": "Zhenni Bi, Kai Han, Chuanjian Liu, Yehui Tang, and Yunhe Wang. Forest-of-thought: Scaling test-time compute for enhancing llm reasoning. arXiv preprint arXiv:2412.09078, 2024. URL https://arxiv.org/pdf/2412.09078." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 567, + 541, + 605 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 567, + 541, + 605 + ], + "spans": [ + { + "bbox": [ + 70, + 567, + 541, + 605 + ], + "type": "text", + "content": "Vadim Borisov, Kathrin Sessler, Tobias Leemann, Martin Pawelczyk, and Gjergji Kasneci. Language models are realistic tabular data generators. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=cEygmmQNOeI." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 613, + 541, + 651 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 613, + 541, + 651 + ], + "spans": [ + { + "bbox": [ + 70, + 613, + 541, + 651 + ], + "type": "text", + "content": "Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling. arXiv preprint arXiv:2407.21787, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 659, + 541, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 659, + 541, + 732 + ], + "spans": [ + { + "bbox": [ + 70, + 659, + 541, + 732 + ], + "type": "text", + "content": "Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models are few-shot learners, 2020. URL https://arxiv.org/abs/2005.14165." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "type": "text", + "content": "41" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 40 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 81, + 541, + 734 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 69, + 81, + 541, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 81, + 541, + 118 + ], + "spans": [ + { + "bbox": [ + 69, + 81, + 541, + 118 + ], + "type": "text", + "content": "Oana-Maria Camburu, Tim Rocktäschel, Thomas Lukasiewicz, and Phil Blunsom. e-snli: Natural language inference with natural language explanations. Advances in Neural Information Processing Systems, 31, 2018." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 126, + 541, + 165 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 126, + 541, + 165 + ], + "spans": [ + { + "bbox": [ + 69, + 126, + 541, + 165 + ], + "type": "text", + "content": "Lorenzo Canese, Gian Carlo Cardarilli, Luca Di Nunzio, Rocco Fazzolari, Daniele Giardino, Marco Re, and Sergio Spanò. Multi-agent reinforcement learning: A review of challenges and applications. Applied Sciences, 11(11):4948, 2021." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 172, + 541, + 235 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 172, + 541, + 235 + ], + "spans": [ + { + "bbox": [ + 70, + 172, + 541, + 235 + ], + "type": "text", + "content": "Yihan Cao, Shuyi Chen, Ryan Liu, Zhiruo Wang, and Daniel Fried. API-assisted code generation for question answering on varied table structures. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pp. 14536-14548, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.897. URL https://aclanthology.org/2023.emnlp-main.897/." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 241, + 541, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 241, + 541, + 281 + ], + "spans": [ + { + "bbox": [ + 69, + 241, + 541, + 281 + ], + "type": "text", + "content": "Stephen Casper, Xander Davies, Claudia Shi, Thomas Krendl Gilbert, Jérémy Scheurer, Javier Rando, Rachel Freedman, Tomasz Korbak, David Lindner, Pedro Freire, et al. Open problems and fundamental limitations of reinforcement learning from human feedback. arXiv preprint arXiv:2307.15217, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 286, + 541, + 324 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 286, + 541, + 324 + ], + "spans": [ + { + "bbox": [ + 70, + 286, + 541, + 324 + ], + "type": "text", + "content": "Chi-Min Chan, Weize Chen, Yusheng Su, Jianxuan Yu, Wei Xue, Shanghang Zhang, Jie Fu, and Zhiyuan Liu. Chateval: Towards better llm-based evaluators through multi-agent debate. arXiv preprint arXiv:2308.07201, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 331, + 541, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 331, + 541, + 358 + ], + "spans": [ + { + "bbox": [ + 70, + 331, + 541, + 358 + ], + "type": "text", + "content": "Edward Y Chang. Socrasynth: Multi-llm reasoning with conditional statistics. arXiv preprint arXiv:2402.06634, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 365, + 541, + 402 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 365, + 541, + 402 + ], + "spans": [ + { + "bbox": [ + 70, + 365, + 541, + 402 + ], + "type": "text", + "content": "Baian Chen, Chang Shu, Ehsan Shareghi, Nigel Collier, Karthik Narasimhan, and Shunyu Yao. Fireact: Toward language agent fine-tuning. CoRR, abs/2310.05915, 2023a. doi: 10.48550/ARXIV.2310.05915. URL https://doi.org/10.48550/arXiv.2310.05915." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 410, + 541, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 410, + 541, + 449 + ], + "spans": [ + { + "bbox": [ + 69, + 410, + 541, + 449 + ], + "type": "text", + "content": "Bei Chen, Fengji Zhang, Anh Nguyen, Daoguang Zan, Zeqi Lin, Jian-Guang Lou, and Weizhu Chen. Codet: Code generation with generated tests. In The Eleventh International Conference on Learning Representations, 2023b. URL https://openreview.net/forum?id=ktrw68Cmu9c." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 455, + 541, + 482 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 455, + 541, + 482 + ], + "spans": [ + { + "bbox": [ + 69, + 455, + 541, + 482 + ], + "type": "text", + "content": "Guoxin Chen, Minpeng Liao, Chengxi Li, and Kai Fan. Alphamath almost zero: process supervision without process. arXiv preprint arXiv:2405.03553, 2024a." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 488, + 541, + 526 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 488, + 541, + 526 + ], + "spans": [ + { + "bbox": [ + 70, + 488, + 541, + 526 + ], + "type": "text", + "content": "Hanjie Chen, Faeze Brahman, Xiang Ren, Yangfeng Ji, Yejin Choi, and Swabha Swayamdipta. Information-theoretic evaluation of free-text rationales with conditional " + }, + { + "bbox": [ + 70, + 488, + 541, + 526 + ], + "type": "inline_equation", + "content": "\\mathcal{V}" + }, + { + "bbox": [ + 70, + 488, + 541, + 526 + ], + "type": "text", + "content": "-information. In Workshop on Trustworthy and Socially Responsible Machine Learning, NeurIPS 2022, 2022." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 533, + 541, + 561 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 533, + 541, + 561 + ], + "spans": [ + { + "bbox": [ + 69, + 533, + 541, + 561 + ], + "type": "text", + "content": "Justin Chih-Yao Chen, Swarnadeep Saha, and Mohit Bansal. Reconcile: Round-table conference improves reasoning via consensus among diverse llms. arXiv preprint arXiv:2309.13007, 2023c." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 567, + 541, + 604 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 567, + 541, + 604 + ], + "spans": [ + { + "bbox": [ + 69, + 567, + 541, + 604 + ], + "type": "text", + "content": "Justin Chih-Yao Chen, Archiki Prasad, Swarnadeep Saha, Elias Stengel-Eskin, and Mohit Bansal. Magicore: Multi-agent, iterative, coarse-to-fine refinement for reasoning, 2024b. URL https://arxiv.org/abs/2409.12147." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 612, + 541, + 734 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 612, + 541, + 734 + ], + "spans": [ + { + "bbox": [ + 69, + 612, + 541, + 734 + ], + "type": "text", + "content": "Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Ponde de Oliveira Pinto, Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, Greg Brockman, Alex Ray, Raul Puri, Gretchen Krueger, Michael Petrov, Heidi Khlaaf, Girish Sastry, Pamela Mishkin, Brooke Chan, Scott Gray, Nick Ryder, Mikhail Pavlov, Alethea Power, Lukasz Kaiser, Mohammad Bavarian, Clemens Winter, Philippe Tillet, Felipe Petroski Such, Dave Cummings, Matthias Plappert, Fotios Chantzis, Elizabeth Barnes, Ariel Herbert-Voss, William Hebgen Guss, Alex Nichol, Alex Paino, Nikolas Tezak, Jie Tang, Igor Babuschkin, Suchir Balaji, Shantanu Jain, William Saunders, Christopher Hesse, Andrew N. Carr, Jan Leike, Josh Achiam, Vedant Misra, Evan Morikawa, Alec Radford, Matthew Knight, Miles Brundage, Mira Murati, Katie Mayer, Peter Welinder, Bob McGrew, Dario Amodei, Sam McCandlish, Ilya Sutskever, and Wojciech Zaremba. Evaluating large language models trained on code, 2021." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "42" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 41 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 81, + 541, + 732 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 70, + 81, + 541, + 108 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 81, + 541, + 108 + ], + "spans": [ + { + "bbox": [ + 70, + 81, + 541, + 108 + ], + "type": "text", + "content": "Pei Chen, Boran Han, and Shuai Zhang. Comm: Collaborative multi-agent, multi-reasoning-path prompting for complex problem solving. arXiv preprint arXiv:2404.17729, 2024c." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 115, + 541, + 177 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 115, + 541, + 177 + ], + "spans": [ + { + "bbox": [ + 70, + 115, + 541, + 177 + ], + "type": "text", + "content": "Wei-Lin Chen, Cheng-Kuang Wu, Yun-Nung Chen, and Hsin-Hsi Chen. Self-ICL: Zero-shot in-context learning with self-generated demonstrations. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pp. 15651–15662, Singapore, December 2023d. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.968. URL https://aclanthology.org/2023.emnlp-main.968/." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 186, + 541, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 186, + 541, + 236 + ], + "spans": [ + { + "bbox": [ + 70, + 186, + 541, + 236 + ], + "type": "text", + "content": "Wenhu Chen. Large language models are few(1)-shot table reasoners. In Andreas Vlachos and Isabelle Augenstein (eds.), Findings of the Association for Computational Linguistics: EACL 2023, pp. 1120-1130, Dubrovnik, Croatia, May 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.findings-eacl.83. URL https://aclanthology.org/2023-findings-eacl.83/." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 243, + 541, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 243, + 541, + 281 + ], + "spans": [ + { + "bbox": [ + 70, + 243, + 541, + 281 + ], + "type": "text", + "content": "Wenhu Chen, Xueguang Ma, Xinyi Wang, and William W. Cohen. Program of thoughts prompting: Disentangling computation from reasoning for numerical reasoning tasks. Transactions on Machine Learning Research, 2023e. ISSN 2835-8856. URL https://openreview.net/forum?id=YfZ4ZPt8zd." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 290, + 541, + 327 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 290, + 541, + 327 + ], + "spans": [ + { + "bbox": [ + 70, + 290, + 541, + 327 + ], + "type": "text", + "content": "Xingyu Chen, Jiahao Xu, Tian Liang, Zhiwei He, Jianhui Pang, Dian Yu, Linfeng Song, Qiuzhi Liu, Mengfei Zhou, Zhuosheng Zhang, et al. Do not think that much for " + }, + { + "bbox": [ + 70, + 290, + 541, + 327 + ], + "type": "inline_equation", + "content": "2 + 3 = ?" + }, + { + "bbox": [ + 70, + 290, + 541, + 327 + ], + "type": "text", + "content": " on the overthinking of o1-like llms. arXiv preprint arXiv:2412.21187, 2024d." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 336, + 541, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 336, + 541, + 408 + ], + "spans": [ + { + "bbox": [ + 70, + 336, + 541, + 408 + ], + "type": "text", + "content": "Zehui Chen, Kuikun Liu, Qiuchen Wang, Wenwei Zhang, Jiangning Liu, Dahua Lin, Kai Chen, and Feng Zhao. Agent-flan: Designing data and methods of effective agent tuning for large language models. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Findings of the Association for Computational Linguistics, ACL 2024, Bangkok, Thailand and virtual meeting, August 11-16, 2024, pp. 9354-9366. Association for Computational Linguistics, 2024e. URL https://doi.org/10.18653/v1/2024-findings-acl.557." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 418, + 541, + 444 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 418, + 541, + 444 + ], + "spans": [ + { + "bbox": [ + 69, + 418, + 541, + 444 + ], + "type": "text", + "content": "Zihan Chen, Song Wang, Zhen Tan, Jundong Li, and Cong Shen. Maple: Many-shot adaptive pseudo-labeling for in-context learning, 2025. URL https://arxiv.org/abs/2505.16225." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 453, + 541, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 453, + 541, + 502 + ], + "spans": [ + { + "bbox": [ + 70, + 453, + 541, + 502 + ], + "type": "text", + "content": "Zhoujun Cheng, Tianbao Xie, Peng Shi, Chengzu Li, Rahul Nadkarni, Yushi Hu, Caiming Xiong, Dragomir Radev, Mari Ostendorf, Luke Zettlemoyer, Noah A. Smith, and Tao Yu. Binding language models in symbolic languages. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=1H1PV42cbF." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 510, + 541, + 559 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 510, + 541, + 559 + ], + "spans": [ + { + "bbox": [ + 70, + 510, + 541, + 559 + ], + "type": "text", + "content": "Yinlam Chow, Guy Tennenholtz, Izzeddin Gur, Vincent Zhuang, Bo Dai, Sridhar Thiagarajan, Craig Boutilier, Rishabh Agarwal, Aviral Kumar, and Aleksandra Faust. Inference-aware fine-tuning for best-of-n sampling in large language models. CoRR, abs/2412.15287, 2024. doi: 10.48550/ARXIV.2412.15287. URL https://doi.org/10.48550/arXiv.2412.15287." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 568, + 541, + 594 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 568, + 541, + 594 + ], + "spans": [ + { + "bbox": [ + 69, + 568, + 541, + 594 + ], + "type": "text", + "content": "Miruna Clinciu, Arash Eshghi, and Helen Hastie. A study of automatic metrics for the evaluation of natural language explanations. arXiv preprint arXiv:2103.08545, 2021." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 603, + 541, + 640 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 603, + 541, + 640 + ], + "spans": [ + { + "bbox": [ + 70, + 603, + 541, + 640 + ], + "type": "text", + "content": "Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, et al. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 649, + 541, + 675 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 649, + 541, + 675 + ], + "spans": [ + { + "bbox": [ + 69, + 649, + 541, + 675 + ], + "type": "text", + "content": "Jonathan Cook, Tim Rocktäschel, Jakob Foerster, Dennis Aumiller, and Alex Wang. Ticking all the boxes: Generated checklists improve llm evaluation and generation. arXiv preprint arXiv:2410.03608, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 684, + 541, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 684, + 541, + 732 + ], + "spans": [ + { + "bbox": [ + 70, + 684, + 541, + 732 + ], + "type": "text", + "content": "Ganqu Cui, Lifan Yuan, Zefan Wang, Hanbin Wang, Wendi Li, Bingxiang He, Yuchen Fan, Tianyu Yu, Qixin Xu, Weize Chen, Jiarui Yuan, Huayu Chen, Kaiyan Zhang, Xingtai Lv, Shuo Wang, Yuan Yao, Xu Han, Hao Peng, Yu Cheng, Zhiyuan Liu, Maosong Sun, Bowen Zhou, and Ning Ding. Process reinforcement through implicit rewards, 2025. URL https://arxiv.org/abs/2502.01456." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "text", + "content": "43" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 42 + }, + { + "para_blocks": [ + { + "bbox": [ + 70, + 81, + 541, + 734 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 70, + 81, + 541, + 119 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 81, + 541, + 119 + ], + "spans": [ + { + "bbox": [ + 70, + 81, + 541, + 119 + ], + "type": "text", + "content": "Chris Cummins, Volker Seeker, Dejan Grubisic, Mostafa Elhoushi, Youwei Liang, Baptiste Roziere, Jonas Gehring, Fabian Gloeckle, Kim Hazelwood, Gabriel Synnaeve, et al. Large language models for compiler optimization. arXiv preprint arXiv:2309.07062, 2023. URL https://arxiv.org/abs/2309.07062." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 124, + 541, + 186 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 124, + 541, + 186 + ], + "spans": [ + { + "bbox": [ + 70, + 124, + 541, + 186 + ], + "type": "text", + "content": "Damai Dai, Yutao Sun, Li Dong, Yaru Hao, Shuming Ma, Zhifang Sui, and Furu Wei. Why can GPT learn in context? language models secretly perform gradient descent as meta-optimizers. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Findings of the Association for Computational Linguistics: ACL 2023, pp. 4005–4019, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-acl.247. URL https://aclanthology.org/2023-findings-acl.247/." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 191, + 541, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 191, + 541, + 229 + ], + "spans": [ + { + "bbox": [ + 70, + 191, + 541, + 229 + ], + "type": "text", + "content": "Ning Dai, Zheng Wu, Renjie Zheng, Ziyun Wei, Wenlei Shi, Xing Jin, Guanlin Liu, Chen Dun, Liang Huang, and Lin Yan. Process supervision-guided policy optimization for code generation. arXiv preprint arXiv:2410.17621, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 235, + 541, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 235, + 541, + 274 + ], + "spans": [ + { + "bbox": [ + 70, + 235, + 541, + 274 + ], + "type": "text", + "content": "Mehul Damani, Idan Shenfeld, Andi Peng, Andreea Bobu, and Jacob Andreas. Learning how hard to think: Input-adaptive allocation of LM computation. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=6qUUgw9bAZ." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 278, + 541, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 278, + 541, + 304 + ], + "spans": [ + { + "bbox": [ + 70, + 278, + 541, + 304 + ], + "type": "text", + "content": "Debrup Das, Debopriyo Banerjee, Somak Aditya, and Ashish Kulkarni. Mathsensei: A tool-augmented large language model for mathematical reasoning. arXiv preprint arXiv:2402.17231, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 309, + 541, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 309, + 541, + 358 + ], + "spans": [ + { + "bbox": [ + 70, + 309, + 541, + 358 + ], + "type": "text", + "content": "Leonardo De Moura, Soonho Kong, Jeremy Avigad, Floris Van Doorn, and Jakob von Raumer. The lean theorem prover (system description). In _Automated Deduction-CADE-25: 25th International Conference on Automated Deduction_, Berlin, Germany, August 1-7, 2015, Proceedings 25, pp. 378-388. Springer, 2015." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 365, + 541, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 365, + 541, + 689 + ], + "spans": [ + { + "bbox": [ + 70, + 365, + 541, + 689 + ], + "type": "text", + "content": "DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, Yu Wu, Z. F. Wu, Zhibin Gou, Zhihong Shao, Zhuoshu Li, Ziyi Gao, Aixin Liu, Bing Xue, Bingxuan Wang, Bochao Wu, Bei Feng, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao, Guanting Chen, Guowei Li, H. Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Qu, Hui Li, Jianzhong Guo, Jiashi Li, Jiawei Wang, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, J. L. Cai, Jiaqi Ni, Jian Liang, Jin Chen, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Liang Zhao, Litong Wang, Liyue Zhang, Lei Xu, Leyi Xia, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Meng Li, Miaojun Wang, Mingming Li, Ning Tian, Panpan Huang, Peng Zhang, Qiancheng Wang, Qinyu Chen, Qiushi Du, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, R. J. Chen, R. L. Jin, Ruyi Chen, Shanghai Lu, Shangyan Zhou, Shanhuang Chen, Shengfeng Ye, Shiyu Wang, Shuiping Yu, Shunfeng Zhou, Shuting Pan, S. S. Li, Shuang Zhou, Shaoqing Wu, Shengfeng Ye, Tao Yun, Tian Pei, Tianyu Sun, T. Wang, Wangding Zeng, Wanjia Zhao, Wen Liu, Wenfeng Liang, Wenjun Gao, Wenqin Yu, Wentao Zhang, W. L. Xiao, Wei An, Xiaodong Liu, Xiaohan Wang, Xiaokang Chen, Xiaotao Nie, Xin Cheng, Xin Liu, Xin Xie, Xingchao Liu, Xinyu Yang, Xinyuan Li, Xuecheng Su, Xuheng Lin, X. Q. Li, Xiangyue Jin, Xiaojin Shen, Xiaosha Chen, Xiaowen Sun, Xiaoxiang Wang, Xinnan Song, Xinyi Zhou, Xianzu Wang, Xinxia Shan, Y. K. Li, Y. Q. Wang, Y. X. Wei, Yang Zhang, Yanhong Xu, Yao Li, Yao Zhao, Yaofeng Sun, Yaohui Wang, Yi Yu, Yichao Zhang, Yifan Shi, Yiliang Xiong, Ying He, Yishi Piao, Yisong Wang, Yixuan Tan, Yiyang Ma, Yiyuan Liu, Yongqiang Guo, Yuan Ou, Yuduan Wang, Yue Gong, Yuheng Zou, Yujia He, Yunfan Xiong, Yuxiang Luo, Yuxiang You, Yuxuan Liu, Yuyang Zhou Y. X. Zhu Yanhong Xu Yanping Huang Yaohui Li Yi ZhengYuchen ZhuYunxian Ma Ying Tang Yukun Zha Yuting YanZ.Z.Ren Zehui Ren,Zhangli ShaZhe FuZhean XuZhenda Xie Zhengyan Zhang,Zhenwen Hao,Zhicheng Ma,Zhigang Yan,Zhiyu WuZihui GuZijia ZhuZijun Liu Zilin Li Ziwei Xie Ziyang Song Zizheng Pan Zhen Huang Zhipeng Xu Zhongyu Zhang and Zhen Zhang.Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning 2025. URL https://arxiv.org/abs/2501.12948." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 695, + 541, + 734 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 695, + 541, + 734 + ], + "spans": [ + { + "bbox": [ + 70, + 695, + 541, + 734 + ], + "type": "text", + "content": "Shumin Deng, Ningyu Zhang, Nay Oo, and Bryan Hooi. Towards a unified view of answer calibration for multi-step reasoning. In Bhavana Dalvi Mishra, Greg Durrett, Peter Jansen, Ben Lipkin, Danilo Neves Ribeiro, Lionel Wong, Xi Ye, and Wenting Zhao (eds.), Proceedings of the 2nd Workshop on Natural" + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "text", + "content": "44" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 43 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 81, + 542, + 734 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 78, + 81, + 541, + 108 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 81, + 541, + 108 + ], + "spans": [ + { + "bbox": [ + 78, + 81, + 541, + 108 + ], + "type": "text", + "content": "Language Reasoning and Structured Explanations (@ACL 2024), pp. 25-38, Bangkok, Thailand, August 2024a. Association for Computational Linguistics. URL https://aclanthology.org/2024.nlrse-1.3/." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 114, + 542, + 142 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 114, + 542, + 142 + ], + "spans": [ + { + "bbox": [ + 70, + 114, + 542, + 142 + ], + "type": "text", + "content": "Yihe Deng, Weitong Zhang, Zixiang Chen, and Quanquan Gu. Rephrase and respond: Let large language models ask better questions for themselves. arXiv preprint arXiv:2311.04205, 2023a." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 148, + 541, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 148, + 541, + 175 + ], + "spans": [ + { + "bbox": [ + 70, + 148, + 541, + 175 + ], + "type": "text", + "content": "Yuntian Deng, Kiran Prasad, Roland Fernandez, Paul Smolensky, Vishrav Chaudhary, and Stuart M. Shieber. Implicit chain of thought reasoning via knowledge distillation. CoRR, abs/2311.01460, 2023b." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 180, + 541, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 180, + 541, + 209 + ], + "spans": [ + { + "bbox": [ + 69, + 180, + 541, + 209 + ], + "type": "text", + "content": "Yuntian Deng, Yejin Choi, and Stuart M. Shieber. From explicit cot to implicit cot: Learning to internalize cot step by step. CoRR, abs/2405.14838, 2024b. URL https://doi.org/10.48550/arXiv.2405.14838." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 213, + 541, + 253 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 213, + 541, + 253 + ], + "spans": [ + { + "bbox": [ + 70, + 213, + 541, + 253 + ], + "type": "text", + "content": "Darshan Deshpande, Selvan Sunitha Ravi, Sky CH-Wang, Bartosz Mielczarek, Anand Kannappan, and Rebecca Qian. Glider: Grading llm interactions and decisions using explainable ranking. arXiv preprint arXiv:2412.14140, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 259, + 541, + 299 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 259, + 541, + 299 + ], + "spans": [ + { + "bbox": [ + 70, + 259, + 541, + 299 + ], + "type": "text", + "content": "Hanze Dong, Wei Xiong, Deepanshu Goyal, Yihan Zhang, Winnie Chow, Rui Pan, Shizhe Diao, Jipeng Zhang, Kashun Shum, and Tong Zhang. RAFT: reward ranked finetuning for generative foundation model alignment. Trans. Mach. Learn. Res., 2023, 2023. URL https://openreview.net/forum?id=m7p507zb1Y." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 304, + 541, + 366 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 304, + 541, + 366 + ], + "spans": [ + { + "bbox": [ + 70, + 304, + 541, + 366 + ], + "type": "text", + "content": "Haoyu Dong and Zhiruo Wang. Large language models for tabular data: Progresses and future directions. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '24, pp. 2997-3000, New York, NY, USA, 2024. Association for Computing Machinery. ISBN 97898400704314. doi: 10.1145/3626772.3661384. URL https://doi.org/10.1145/3626772.3661384." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 373, + 541, + 412 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 373, + 541, + 412 + ], + "spans": [ + { + "bbox": [ + 70, + 373, + 541, + 412 + ], + "type": "text", + "content": "Qingxiu Dong, Lei Li, Damai Dai, Ce Zheng, Jingyuan Ma, Rui Li, Heming Xia, Jingjing Xu, Zhiyong Wu, Baobao Chang, et al. A survey on in-context learning. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pp. 1107-1128, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 418, + 541, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 418, + 541, + 469 + ], + "spans": [ + { + "bbox": [ + 70, + 418, + 541, + 469 + ], + "type": "text", + "content": "Jiri Dostál. Theory of problem solving. Procedia - Social and Behavioral Sciences, 174:2798-2805, 2015. ISSN 1877-0428. doi: https://doi.org/10.1016/j.sbspro.2015.01.970. URL https://www.sciencedirect.com/science/article/pii/S1877042815010290. International Conference on New Horizons in Education, INTE 2014, 25-27 June 2014, Paris, France." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 476, + 541, + 538 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 476, + 541, + 538 + ], + "spans": [ + { + "bbox": [ + 70, + 476, + 541, + 538 + ], + "type": "text", + "content": "Shihan Dou, Yan Liu, Haoxiang Jia, Limao Xiong, Enyu Zhou, Wei Shen, Junjie Shan, Caishuang Huang, Xiao Wang, Xiaoran Fan, Zhiheng Xi, Yuhao Zhou, Tao Ji, Rui Zheng, Qi Zhang, Xuanjing Huang, and Tao Gui. Stepcoder: Improve code generation with reinforcement learning from compiler feedback. CoRR, abs/2402.01391, 2024a. doi: 10.48550/ARXIV.2402.01391. URL https://doi.org/10.48550/arXiv.2402.01391." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 544, + 541, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 544, + 541, + 608 + ], + "spans": [ + { + "bbox": [ + 70, + 544, + 541, + 608 + ], + "type": "text", + "content": "Zi-Yi Dou, Cheng-Fu Yang, Xueqing Wu, Kai-Wei Chang, and Nanyun Peng. Re-ReST: Reflection-reinforced self-training for language agents. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (eds.), Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pp. 15394-15411, Miami, Florida, USA, November 2024b. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.861. URL https://aclanthology.org/2024.emnlp-main.861/." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 614, + 541, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 614, + 541, + 677 + ], + "spans": [ + { + "bbox": [ + 70, + 614, + 541, + 677 + ], + "type": "text", + "content": "Dheeru Dua, Shivanshu Gupta, Sameer Singh, and Matt Gardner. Successive prompting for decomposing complex questions. In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang (eds.), Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 1251-1265, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.emnlp-main.81. URL https://aclanthology.org/2022.emnlp-main.81." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 683, + 541, + 734 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 683, + 541, + 734 + ], + "spans": [ + { + "bbox": [ + 70, + 683, + 541, + 734 + ], + "type": "text", + "content": "Jinhao Duan, Shiqi Wang, James Diffenderfer, Lichao Sun, Tianlong Chen, Bhavya Kailkhura, and Kaidi Xu. Reta: Recursively thinking ahead to improve the strategic reasoning of large language models. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 2232-2246, 2024." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "text", + "content": "45" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 44 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 81, + 541, + 734 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 70, + 81, + 541, + 108 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 81, + 541, + 108 + ], + "spans": [ + { + "bbox": [ + 70, + 81, + 541, + 108 + ], + "type": "text", + "content": "Tom Duenas and Diana Ruiz. The path to superintelligence: A critical analysis of openai's five levels of ai progression. Research Gate, 2024." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 112, + 541, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 112, + 541, + 152 + ], + "spans": [ + { + "bbox": [ + 70, + 112, + 541, + 152 + ], + "type": "text", + "content": "Subhabrata Dutta, Joykirat Singh, Soumen Chakrabarti, and Tanmoy Chakraborty. How to think step-by-step: A mechanistic understanding of chain-of-thought reasoning. Transactions on Machine Learning Research, 2024. ISSN 2835-8856. URL https://openreview.net/forum?id=uHLDkQVtyC." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 156, + 541, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 156, + 541, + 194 + ], + "spans": [ + { + "bbox": [ + 70, + 156, + 541, + 194 + ], + "type": "text", + "content": "Nelson Elhage, Neel Nanda, Catherine Olsson, Tom Henighan, Nicholas Joseph, Ben Mann, Amanda Askell, Yuntao Bai, Anna Chen, Tom Conerly, et al. A mathematical framework for transformer circuits. Transformer Circuits Thread, 1(1):12, 2021." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 198, + 541, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 198, + 541, + 226 + ], + "spans": [ + { + "bbox": [ + 70, + 198, + 541, + 226 + ], + "type": "text", + "content": "Andrew Estornell, Jean-Francois Ton, Yuanshun Yao, and Yang Liu. Acc-debate: An actor-critic approach to multi-agent debate. arXiv preprint arXiv:2411.00053, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 230, + 541, + 257 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 230, + 541, + 257 + ], + "spans": [ + { + "bbox": [ + 70, + 230, + 541, + 257 + ], + "type": "text", + "content": "Kawin Ethayarajh, Winnie Xu, Niklas Muennighoff, Dan Jurafsky, and Douwe Kiela. Kto: Model alignment as prospect theoretic optimization. arXiv preprint arXiv:2402.01306, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 261, + 541, + 311 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 261, + 541, + 311 + ], + "spans": [ + { + "bbox": [ + 70, + 261, + 541, + 311 + ], + "type": "text", + "content": "Meng Fang, Shilong Deng, Yudi Zhang, Zijing Shi, Ling Chen, Mykola Pechenizkiy, and Jun Wang. Large language models are neurosymbolic reasoners. Proceedings of the AAAI Conference on Artificial Intelligence, 38(16):17985-17993, Mar. 2024a. doi: 10.1609/aaai.v38i16.29754. URL https://ojs.aaai.org/index.php/AAAI/article/view/29754." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 316, + 541, + 366 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 316, + 541, + 366 + ], + "spans": [ + { + "bbox": [ + 70, + 316, + 541, + 366 + ], + "type": "text", + "content": "Xi Fang, Weijie Xu, Fiona Anting Tan, Ziqing Hu, Jiani Zhang, Yanjun Qi, Srinivasan H. Sengamedu, and Christos Faloutsos. Large language models (LLMs) on tabular data: Prediction, generation, and understanding - a survey. Transactions on Machine Learning Research, 2024b. ISSN 2835-8856. URL https://openreview.net/forum?id=IZnrCGF9WI." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 370, + 541, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 370, + 541, + 409 + ], + "spans": [ + { + "bbox": [ + 69, + 370, + 541, + 409 + ], + "type": "text", + "content": "Guhao Feng, Bohang Zhang, Yuntian Gu, Haotian Ye, Di He, and Liwei Wang. Towards revealing the mystery behind chain of thought: a theoretical perspective. Advances in Neural Information Processing Systems, 36:70757-70798, 2023a." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 414, + 541, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 414, + 541, + 475 + ], + "spans": [ + { + "bbox": [ + 70, + 414, + 541, + 475 + ], + "type": "text", + "content": "Jiazhan Feng, Ruochen Xu, Junheng Hao, Hiteshi Sharma, Yelong Shen, Dongyan Zhao, and Weizhu Chen. Language models can be deductive solvers. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Findings of the Association for Computational Linguistics: NAACL 2024, pp. 4026-4042, Mexico City, Mexico, June 2024a. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-nacl.254. URL https://aclanthology.org/2024 findings-nacl.254/." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 481, + 541, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 481, + 541, + 520 + ], + "spans": [ + { + "bbox": [ + 70, + 481, + 541, + 520 + ], + "type": "text", + "content": "Xiachong Feng, Longxu Dou, Ella Li, Qinghao Wang, Haochuan Wang, Yu Guo, Chang Ma, and Lingpeng Kong. A survey on large language model-based social agents in game-theoretic scenarios, 2024b. URL https://arxiv.org/abs/2412.03920." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 524, + 541, + 563 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 524, + 541, + 563 + ], + "spans": [ + { + "bbox": [ + 70, + 524, + 541, + 563 + ], + "type": "text", + "content": "Xidong Feng, Ziyu Wan, Muning Wen, Stephen Marcus McAleer, Ying Wen, Weinan Zhang, and Jun Wang. Alphazero-like tree-search can guide large language model decoding and training. arXiv preprint arXiv:2309.17179, 2023b." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 567, + 541, + 605 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 567, + 541, + 605 + ], + "spans": [ + { + "bbox": [ + 70, + 567, + 541, + 605 + ], + "type": "text", + "content": "Chrisantha Fernando, Dylan Sunil Banarse, Henryk Michalewski, Simon Osindero, and Tim Rocktäschel. Promptbreeder: Self-referential self-improvement via prompt evolution. In *Forty-first International Conference on Machine Learning*, 2024. URL https://openreview.net/forum?id=9ZxnPZGmPU." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 609, + 541, + 648 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 609, + 541, + 648 + ], + "spans": [ + { + "bbox": [ + 70, + 609, + 541, + 648 + ], + "type": "text", + "content": "Emily First, Markus N Rabe, Talia Ringer, and Yuriy Brun. Baldur: Whole-proof generation and repair with large language models. In Proceedings of the 31st ACM Joint European Software Engineering Conference and Symposium on the Foundations of Software Engineering, pp. 1229-1241, 2023." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 652, + 541, + 692 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 652, + 541, + 692 + ], + "spans": [ + { + "bbox": [ + 70, + 652, + 541, + 692 + ], + "type": "text", + "content": "Yann Fleureau, Jia Li, Edward Beeching, Lewis Tunstall, Ben Lipkin, Roman Soletskyi, Shengyi Costa Huang, and Kashif Rasul. How NuminaMath won the 1st AIMO Progress Prize. https://huggingface.co/blog/winning-aimo-progress-prize, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 70, + 696, + 541, + 734 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 696, + 541, + 734 + ], + "spans": [ + { + "bbox": [ + 70, + 696, + 541, + 734 + ], + "type": "text", + "content": "Adam Fourney, Gagan Bansal, Hussein Mozannar, Cheng Tan, Eduardo Salinas, Friederike Niedtner, Grace Proebsting, Griffin Bassman, Jack Gerrits, Jacob Alber, et al. Magentic-one: A generalist multi-agent system for solving complex tasks. arXiv preprint arXiv:2411.04468, 2024." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 751, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 312, + 760 + ], + "type": "text", + "content": "46" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 45 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 81, + 541, + 734 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 70, + 81, + 541, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 81, + 541, + 118 + ], + "spans": [ + { + "bbox": [ + 70, + 81, + 541, + 118 + ], + "type": "text", + "content": "Adrian Garret Gabriel, Alaa Alameer Ahmad, and Shankar Kumar Jeyakumar. Advancing agentic systems: Dynamic task decomposition, tool integration and evaluation using novel metrics and dataset, 2024. URL https://arxiv.org/abs/2410.22457." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 124, + 541, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 124, + 541, + 175 + ], + "spans": [ + { + "bbox": [ + 70, + 124, + 541, + 175 + ], + "type": "text", + "content": "Kanishk Gandhi, Jan-Philipp Franken, Tobias Gerstenberg, and Noah Goodman. Understanding social reasoning in language models with language models. In Thirty-seventh Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2023. URL https://openreview.net/forum?id=8bqjirgxQM." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 179, + 541, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 179, + 541, + 217 + ], + "spans": [ + { + "bbox": [ + 70, + 179, + 541, + 217 + ], + "type": "text", + "content": "Deep Ganguli, Liane Lovitt, Jackson Kernion, Amanda Askell, Yuntao Bai, Saurav Kadavath, Ben Mann, Ethan Perez, Nicholas Schiefer, Kamal Ndousse, et al. Red teaming language models to reduce harms: Methods, scaling behaviors, and lessons learned. arXiv preprint arXiv:2209.07858, 2022." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 222, + 541, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 222, + 541, + 250 + ], + "spans": [ + { + "bbox": [ + 70, + 222, + 541, + 250 + ], + "type": "text", + "content": "Jiaxuan Gao, Shusheng Xu, Wenjie Ye, Weilin Liu, Chuyi He, Wei Fu, Zhiyu Mei, Guangju Wang, and Yi Wu. On designing effective rl reward at training time for llm reasoning. arXiv preprint arXiv:2410.15115, 2024a." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 253, + 541, + 280 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 253, + 541, + 280 + ], + "spans": [ + { + "bbox": [ + 70, + 253, + 541, + 280 + ], + "type": "text", + "content": "Peizhong Gao, Ao Xie, Shaoguang Mao, Wenshan Wu, Yan Xia, Haipeng Mi, and Furu Wei. Meta reasoning for large language models. arXiv preprint arXiv:2406.11698, 2024b." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 284, + 541, + 322 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 284, + 541, + 322 + ], + "spans": [ + { + "bbox": [ + 70, + 284, + 541, + 322 + ], + "type": "text", + "content": "Olga Golovneva, Moya Chen, Spencer Poff, Martin Corredor, Luke Zettlemoyer, Maryam Fazel-Zarandi, and Asli Celikyilmaz. Roscoe: A suite of metrics for scoring step-by-step reasoning. arXiv preprint arXiv:2212.07919, 2022." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 327, + 541, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 327, + 541, + 376 + ], + "spans": [ + { + "bbox": [ + 70, + 327, + 541, + 376 + ], + "type": "text", + "content": "Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. Advances in neural information processing systems, 27, 2014. URL https://proceedings.neurips.cc/paper_files/paper/2014/file/5ca3e9b122f61f8f06494c97b1afccf3-Paper.pdf." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 381, + 541, + 432 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 381, + 541, + 432 + ], + "spans": [ + { + "bbox": [ + 70, + 381, + 541, + 432 + ], + "type": "text", + "content": "Zhibin Gou, Zhihong Shao, Yeyun Gong, Yelong Shen, Yujiu Yang, Minlie Huang, Nan Duan, and Weizhu Chen. Tora: A tool-integrated reasoning agent for mathematical problem solving. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=Ep0TjVoap." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 437, + 541, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 437, + 541, + 475 + ], + "spans": [ + { + "bbox": [ + 70, + 437, + 541, + 475 + ], + "type": "text", + "content": "Sachin Goyal, Ziwei Ji, Ankit Singh Rawat, Aditya Krishna Menon, Sanjiv Kumar, and Vaishnavh Nagarajan. Think before you speak: Training language models with pause tokens. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=ph04CRkPdC." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 480, + 541, + 518 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 480, + 541, + 518 + ], + "spans": [ + { + "bbox": [ + 70, + 480, + 541, + 518 + ], + "type": "text", + "content": "Nate Gruver, Marc Anton Finzi, Shikai Qiu, and Andrew Gordon Wilson. Large language models are zero-shot time series forecasters. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=md68e8iZK1." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 523, + 541, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 523, + 541, + 550 + ], + "spans": [ + { + "bbox": [ + 69, + 523, + 541, + 550 + ], + "type": "text", + "content": "Zhengyao Gu, Henry Peng Zou, Yankai Chen, Aiwei Liu, Weizhi Zhang, and Philip S Yu. Semi-supervised in-context learning: A baseline study. arXiv preprint arXiv:2503.03062, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 554, + 541, + 592 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 554, + 541, + 592 + ], + "spans": [ + { + "bbox": [ + 70, + 554, + 541, + 592 + ], + "type": "text", + "content": "Xinyu Guan, Li Lyna Zhang, Yifei Liu, Ning Shang, Youran Sun, Yi Zhu, Fan Yang, and Mao Yang. rstar-math: Small llms can master math reasoning with self-evolved deep thinking. arXiv preprint arXiv: 2501.04519, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 597, + 541, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 597, + 541, + 634 + ], + "spans": [ + { + "bbox": [ + 70, + 597, + 541, + 634 + ], + "type": "text", + "content": "Jiaxian Guo, Bo Yang, Paul Yoo, Bill Yuchen Lin, Yusuke Iwasawa, and Yutaka Matsuo. Suspicion-agent: Playing imperfect information games with theory of mind aware gpt-4. arXiv preprint arXiv:2309.17277, 2023." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 639, + 541, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 639, + 541, + 690 + ], + "spans": [ + { + "bbox": [ + 70, + 639, + 541, + 690 + ], + "type": "text", + "content": "Qingyan Guo, Rui Wang, Junliang Guo, Bei Li, Kaitao Song, Xu Tan, Guoqing Liu, Jiang Bian, and Yujiu Yang. Connecting large language models with evolutionary algorithms yields powerful prompt optimizers. In The Twelfth International Conference on Learning Representations, 2024a. URL https://openreview.net/forum?id=ZG3RaNIs08." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 70, + 695, + 541, + 734 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 695, + 541, + 734 + ], + "spans": [ + { + "bbox": [ + 70, + 695, + 541, + 734 + ], + "type": "text", + "content": "Taicheng Guo, Xiuying Chen, Yaqi Wang, Ruidi Chang, Shichao Pei, Nitesh V Chawla, Olaf Wiest, and Xiangliang Zhang. Large language model based multi-agents: A survey of progress and challenges. arXiv preprint arXiv:2402.01680, 2024b." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "47" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 46 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 81, + 541, + 732 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 70, + 81, + 541, + 120 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 81, + 541, + 120 + ], + "spans": [ + { + "bbox": [ + 70, + 81, + 541, + 120 + ], + "type": "text", + "content": "Zakaria Hammane, Fatima-Ezzahraa Ben-Bouazza, and Abdelhadi Fennan. Selfrewarddrag: Enhancing medical reasoning with retrieval-augmented generation and self-evaluation in large language models. In 2024 International Conference on Intelligent Systems and Computer Vision (ISCV), pp. 1-8. IEEE, 2024." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 127, + 541, + 165 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 127, + 541, + 165 + ], + "spans": [ + { + "bbox": [ + 70, + 127, + 541, + 165 + ], + "type": "text", + "content": "Simeng Han, Hailey Schoelkopf, Yilun Zhao, Zhenting Qi, Martin Riddell, Wenfei Zhou, James Coady, David Peng, Yujie Qiao, Luke Benson, et al. Folio: Natural language reasoning with first-order logic. arXiv preprint arXiv:2209.00840, 2022." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 172, + 541, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 172, + 541, + 246 + ], + "spans": [ + { + "bbox": [ + 70, + 172, + 541, + 246 + ], + "type": "text", + "content": "Simeng Han, Aaron Yu, Rui Shen, Zhenting Qi, Martin Riddell, Wenfei Zhou, Yujie Qiao, Yilun Zhao, Semih Yavuz, Ye Liu, Shafiq Joty, Yingbo Zhou, Caiming Xiong, Dragomir Radev, Rex Ying, and Arman Cohen. P-FOLIO: Evaluating and improving logical reasoning with abundant human-written reasoning chains. In Findings of the Association for Computational Linguistics: EMNLP 2024, pp. 16553-16565, Miami, Florida, USA, November 2024a. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-emnlp.966. URL https://aclanthology.org/2024-findings-emnlp.966/." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 254, + 541, + 279 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 254, + 541, + 279 + ], + "spans": [ + { + "bbox": [ + 70, + 254, + 541, + 279 + ], + "type": "text", + "content": "Simon Jerome Han, Keith J Ransom, Andrew Perfors, and Charles Kemp. Inductive reasoning in humans and large language models. Cognitive Systems Research, 83:101155, 2024b." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 287, + 541, + 349 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 287, + 541, + 349 + ], + "spans": [ + { + "bbox": [ + 70, + 287, + 541, + 349 + ], + "type": "text", + "content": "Shibo Hao, Yi Gu, Haodi Ma, Joshua Jiahua Hong, Zhen Wang, Daisy Zhe Wang, and Zhiting Hu. Reasoning with language model is planning with world model. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, EMNLP 2023, Singapore, December 6-10, 2023, pp. 8154-8173. Association for Computational Linguistics, 2023. doi: 10.18653/V1/2023.EMNLP-MAIN.507. URL https://doi.org/10.18653/v1/2023.emnlp-main.507." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 356, + 541, + 394 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 356, + 541, + 394 + ], + "spans": [ + { + "bbox": [ + 70, + 356, + 541, + 394 + ], + "type": "text", + "content": "Shibo Hao, Yi Gu, Haotian Luo, Tianyang Liu, Xiyuan Shao, Xinyuan Wang, Shuhua Xie, Haodi Ma, Adithya Samavedhi, Qiyue Gao, et al. Llm reasoners: New evaluation, library, and analysis of step-by-step reasoning with large language models. arXiv preprint arXiv:2404.05221, 2024a." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 402, + 541, + 438 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 402, + 541, + 438 + ], + "spans": [ + { + "bbox": [ + 70, + 402, + 541, + 438 + ], + "type": "text", + "content": "Shibo Hao, Sainbayar Sukhbaatar, DiJia Su, Xian Li, Zhiting Hu, Jason Weston, and Yuandong Tian. Training large language models to reason in a continuous latent space. CoRR, abs/2412.06769, 2024b. URL https://doi.org/10.48550/arXiv.2412.06769." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 447, + 541, + 483 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 447, + 541, + 483 + ], + "spans": [ + { + "bbox": [ + 70, + 447, + 541, + 483 + ], + "type": "text", + "content": "Peter Hase, Shiyue Zhang, Harry Xie, and Mohit Bansal. Leakage-adjusted simulatability: Can models generate non-trivial explanations of their behavior in natural language? arXiv preprint arXiv:2010.04119, 2020." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 492, + 541, + 518 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 492, + 541, + 518 + ], + "spans": [ + { + "bbox": [ + 70, + 492, + 541, + 518 + ], + "type": "text", + "content": "Michael Hassid, Tal Remez, Jonas Gehring, Roy Schwartz, and Yossi Adi. The larger the better? improved llm code-generation via budget reallocation. arXiv preprint arXiv:2404.00725, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 526, + 541, + 563 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 526, + 541, + 563 + ], + "spans": [ + { + "bbox": [ + 70, + 526, + 541, + 563 + ], + "type": "text", + "content": "Alex Havrilla, Yuqing Du, Sharath Chandra Raparthy, Christoforos Nalmpantis, Jane Dwivedi-Yu, Maksym Zhuravinskyi, Eric Hambro, Sainbayar Sukhbaatar, and Roberta Raileanu. Teaching large language models to reason with reinforcement learning. arXiv preprint arXiv:2403.04642, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 571, + 541, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 571, + 541, + 609 + ], + "spans": [ + { + "bbox": [ + 70, + 571, + 541, + 609 + ], + "type": "text", + "content": "Jiabang He, Lei Wang, Yi Hu, Ning Liu, Hui Liu, Xing Xu, and Heng Tao Shen. Icl-d3ie: In-context learning with diverse demonstrations updating for document information extraction. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 19485-19494, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 616, + 541, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 616, + 541, + 643 + ], + "spans": [ + { + "bbox": [ + 69, + 616, + 541, + 643 + ], + "type": "text", + "content": "Jinwei He and Feng Lu. Causejudger: Identifying the cause with llms for abductive logical reasoning. arXiv preprint arXiv:2409.05559, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 651, + 541, + 687 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 651, + 541, + 687 + ], + "spans": [ + { + "bbox": [ + 70, + 651, + 541, + 687 + ], + "type": "text", + "content": "Dan Hendrycks, Steven Basart, Saurav Kadavath, Mantas Mazeika, Akul Arora, Ethan Guo, Collin Burns, Samir Puranik, Horace He, Dawn Song, and Jacob Steinhardt. Measuring coding challenge competence with apps. NeurIPS, 2021a." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 696, + 541, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 696, + 541, + 732 + ], + "spans": [ + { + "bbox": [ + 70, + 696, + 541, + 732 + ], + "type": "text", + "content": "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the math dataset. arXiv preprint arXiv:2103.03874, 2021b." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "text", + "content": "48" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 47 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 81, + 541, + 733 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 70, + 81, + 541, + 143 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 81, + 541, + 143 + ], + "spans": [ + { + "bbox": [ + 70, + 81, + 541, + 143 + ], + "type": "text", + "content": "Jonathan Herzig, Pawel Krzysztof Nowak, Thomas Müller, Francesco Piccinno, and Julian Eisenschlos. TaPas: Weakly supervised table parsing via pre-training. In Dan Jurafsky, Joyce Chai, Natalie Schluter, and Joel Tetreault (eds.), Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pp. 4320-4333, Online, July 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.acl-main.398. URL https://aclanthology.org/2020.acl-main.398/." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 148, + 541, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 148, + 541, + 175 + ], + "spans": [ + { + "bbox": [ + 70, + 148, + 541, + 175 + ], + "type": "text", + "content": "Keith J Holyoak. Analogy and relational reasoning. The Oxford handbook of thinking and reasoning, pp. 234-259, 2012. URL https://psycnet.apa.org/record/2012-08871-013." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 179, + 541, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 179, + 541, + 217 + ], + "spans": [ + { + "bbox": [ + 70, + 179, + 541, + 217 + ], + "type": "text", + "content": "Jiwoo Hong, Noah Lee, and James Thorne. Orpo: Monolithic preference optimization without reference model. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pp. 11170-11189, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 222, + 541, + 259 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 222, + 541, + 259 + ], + "spans": [ + { + "bbox": [ + 70, + 222, + 541, + 259 + ], + "type": "text", + "content": "Sirui Hong, Xiawu Zheng, Jonathan Chen, Yuheng Cheng, Jinlin Wang, Ceyao Zhang, Zili Wang, Steven Ka Shing Yau, Zijuan Lin, Liyang Zhou, et al. Metagpt: Meta programming for multi-agent collaborative framework. arXiv preprint arXiv:2308.00352, 3(4):6, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 265, + 541, + 291 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 265, + 541, + 291 + ], + "spans": [ + { + "bbox": [ + 70, + 265, + 541, + 291 + ], + "type": "text", + "content": "Xinyi Hou, Yanjie Zhao, Shenao Wang, and Haoyu Wang. Model context protocol (mcp): Landscape, security threats, and future research directions. arXiv preprint arXiv:2503.23278, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 296, + 541, + 332 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 296, + 541, + 332 + ], + "spans": [ + { + "bbox": [ + 70, + 296, + 541, + 332 + ], + "type": "text", + "content": "Shengran Hu, Cong Lu, and Jeff Clune. Automated design of agentic systems. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=t9U3LW7JVX." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 338, + 541, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 338, + 541, + 365 + ], + "spans": [ + { + "bbox": [ + 70, + 338, + 541, + 365 + ], + "type": "text", + "content": "Xinyu Hu, Li Lin, Mingqi Gao, Xunjian Yin, and Xiaojun Wan. Themis: A reference-free nlg evaluation language model with flexibility and interpretability. arXiv preprint arXiv:2406.18365, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 370, + 541, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 370, + 541, + 430 + ], + "spans": [ + { + "bbox": [ + 70, + 370, + 541, + 430 + ], + "type": "text", + "content": "Jie Huang and Kevin Chen-Chuan Chang. Towards reasoning in large language models: A survey. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Findings of the Association for Computational Linguistics: ACL 2023, pp. 1049–1065, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.findings-acl.67. URL https://aclanthology.org/2023-findings-acl.67/." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 437, + 541, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 437, + 541, + 475 + ], + "spans": [ + { + "bbox": [ + 70, + 437, + 541, + 475 + ], + "type": "text", + "content": "Jie Huang, Xinyun Chen, Swaroop Mishra, Huaixiu Steven Zheng, Adams Wei Yu, Xinying Song, and Denny Zhou. Large language models cannot self-correct reasoning yet. In The Twelfth International Conference on Learning Representations, 2024a. URL https://openreview.net/forum?id=IkmD3fKBPQ." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 480, + 541, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 480, + 541, + 540 + ], + "spans": [ + { + "bbox": [ + 70, + 480, + 541, + 540 + ], + "type": "text", + "content": "Siming Huang, Tianhao Cheng, J. K. Liu, Jiaran Hao, Liuyihan Song, Yang Xu, J. Yang, J. H. Liu, Chenchen Zhang, Linzheng Chai, Ruifeng Yuan, Zhaoxiang Zhang, Jie Fu, Qian Liu, Ge Zhang, Zili Wang, Yuan Qi, Yinghui Xu, and Wei Chu. Opencoder: The open cookbook for top-tier code large language models. CoRR, abs/2411.04905, 2024b. doi: 10.48550/ARXIV.2411.04905. URL https://doi.org/10.48550/arXiv.2411.04905." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 547, + 541, + 585 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 547, + 541, + 585 + ], + "spans": [ + { + "bbox": [ + 70, + 547, + 541, + 585 + ], + "type": "text", + "content": "Yuncheng Huang, Qianyu He, Yipei Xu, Jiaqing Liang, and Yanghua Xiao. Laying the foundation first? investigating the generalization from atomic skills to complex reasoning tasks, 2024c. URL https:// arxiv.org/abs/2403.09479." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 590, + 541, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 590, + 541, + 628 + ], + "spans": [ + { + "bbox": [ + 70, + 590, + 541, + 628 + ], + "type": "text", + "content": "Zhen Huang, Haoyang Zou, Xuefeng Li, Yixiu Liu, Yuxiang Zheng, Ethan Chern, Shijie Xia, Yiwei Qin, Weizhe Yuan, and Pengfei Liu. O1 replication journey-part 2: Surpassing o1-preview through simple distillation, big progress or bitter lesson? arXiv preprint arXiv:2411.16489, 2024d." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 633, + 541, + 659 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 633, + 541, + 659 + ], + "spans": [ + { + "bbox": [ + 70, + 633, + 541, + 659 + ], + "type": "text", + "content": "Binyuan Hui, Jian Yang, Zeyu Cui, Jiaxi Yang, Dayiheng Liu, Lei Zhang, Tianyu Liu, Jiajun Zhang, Bowen Yu, Keming Lu, et al. Qwen2. 5-coder technical report. arXiv preprint arXiv:2409.12186, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 664, + 541, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 664, + 541, + 690 + ], + "spans": [ + { + "bbox": [ + 69, + 664, + 541, + 690 + ], + "type": "text", + "content": "Michael Huth and Mark Ryan. Logic in computer science: Modelling and reasoning about systems. Cambridge university press., 86, 2004." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 70, + 695, + 541, + 733 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 695, + 541, + 733 + ], + "spans": [ + { + "bbox": [ + 70, + 695, + 541, + 733 + ], + "type": "text", + "content": "Naman Jain, King Han, Alex Gu, Wen-Ding Li, Fanjia Yan, Tianjun Zhang, Sida Wang, Armando Solar-Lezama, Koushik Sen, and Ion Stoica. Livocodebench: Holistic and contamination free evaluation of large language models for code. arXiv preprint arXiv:2403.07974, 2024." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "text", + "content": "49" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 48 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 81, + 541, + 733 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 70, + 81, + 541, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 81, + 541, + 118 + ], + "spans": [ + { + "bbox": [ + 70, + 81, + 541, + 118 + ], + "type": "text", + "content": "Harold Jeffreys. An invariant form for the prior probability in estimation problems. Proceedings of the Royal Society of London. Series A, Mathematical and Physical Sciences, 186:453-461, 1946. doi: 10.1098/rspa.1946.0056. URL http://doi.org/10.1098/rspa.1946.0056." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 125, + 541, + 163 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 125, + 541, + 163 + ], + "spans": [ + { + "bbox": [ + 70, + 125, + 541, + 163 + ], + "type": "text", + "content": "Albert Q. Jiang, Wenda Li, and Mateja Jamnik. Multi-language diversity benefits autoformalization. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024a. URL https://openreview.net/forum?id=2jjfRm2R6D." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 170, + 541, + 232 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 170, + 541, + 232 + ], + "spans": [ + { + "bbox": [ + 70, + 170, + 541, + 232 + ], + "type": "text", + "content": "Jinhao Jiang, Kun Zhou, Zican Dong, Keming Ye, Xin Zhao, and Ji-Rong Wen. StructGPT: A general framework for large language model to reason over structured data. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pp. 9237-9251, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.574. URL https://aclanthology.org/2023.emnlp-main.574/." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 238, + 541, + 275 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 238, + 541, + 275 + ], + "spans": [ + { + "bbox": [ + 70, + 238, + 541, + 275 + ], + "type": "text", + "content": "Jinhao Jiang, Jiayi Chen, Junyi Li, Ruiyang Ren, Shijie Wang, Wayne Xin Zhao, Yang Song, and Tao Zhang. Rag-star: Enhancing deliberative reasoning with retrieval augmented verification and refinement. arXiv preprint arXiv:2412.12881, 2024b." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 282, + 541, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 282, + 541, + 342 + ], + "spans": [ + { + "bbox": [ + 70, + 282, + 541, + 342 + ], + "type": "text", + "content": "Fangkai Jiao, Chengwei Qin, Zhengyuan Liu, Nancy Chen, and Shafiq Joty. Learning planning-based reasoning by trajectories collection and process reward synthesizing. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (eds.), Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, EMNLP 2024, Miami, FL, USA, November 12-16, 2024, pp. 334-350. Association for Computational Linguistics, 2024a. URL https://aclanthology.org/2024.emnlp-main.20." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 350, + 541, + 411 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 350, + 541, + 411 + ], + "spans": [ + { + "bbox": [ + 70, + 350, + 541, + 411 + ], + "type": "text", + "content": "Fangkai Jiao, Zhiyang Teng, Bosheng Ding, Zhengyuan Liu, Nancy F. Chen, and Shafiq Joty. Exploring self-supervised logic-enhanced training for large language models. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), NAACL 2024, Mexico City, Mexico, June 16-21, 2024, pp. 926-941. Association for Computational Linguistics, 2024b." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 418, + 541, + 444 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 418, + 541, + 444 + ], + "spans": [ + { + "bbox": [ + 69, + 418, + 541, + 444 + ], + "type": "text", + "content": "Fangkai Jiao, Geyang Guo, Xingxing Zhang, Nancy F. Chen, Shafiq Joty, and Furu Wei. Preference optimization for reasoning with pseudo feedback. In ICLR. OpenReview.net, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 451, + 541, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 451, + 541, + 499 + ], + "spans": [ + { + "bbox": [ + 70, + 451, + 541, + 499 + ], + "type": "text", + "content": "Carlos E. Jimenez, John Yang, Alexander Wettig, Shunyu Yao, Kexin Pei, Ofir Press, and Karthik R. Narasimhan. Swe-bench: Can language models resolve real-world github issues? In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=VTF8yNQM66." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 507, + 541, + 580 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 507, + 541, + 580 + ], + "spans": [ + { + "bbox": [ + 70, + 507, + 541, + 580 + ], + "type": "text", + "content": "Jaehun Jung, Lianhui Qin, Sean Welleck, Faeze Brahman, Chandra Bhagavatula, Ronan Le Bras, and Yejin Choi. Maieutic prompting: Logically consistent reasoning with recursive explanations. In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang (eds.), Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 1266-1279, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.emnlp-main.82. URL https://aclanthology.org/2022.emnlp-main.82/." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 587, + 541, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 587, + 541, + 613 + ], + "spans": [ + { + "bbox": [ + 69, + 587, + 541, + 613 + ], + "type": "text", + "content": "Jaehun Jung, Faeze Brahman, and Yejin Choi. Trust or escalate: Llm judges with provable guarantees for human agreement. arXiv preprint arXiv:2407.18370, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 620, + 541, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 620, + 541, + 656 + ], + "spans": [ + { + "bbox": [ + 70, + 620, + 541, + 656 + ], + "type": "text", + "content": "Katie Kang, Amrith Setlur, Dibya Ghosh, Jacob Steinhardt, Claire Tomlin, Sergey Levine, and Aviral Kumar. What do learning dynamics reveal about generalization in llm reasoning?, 2024. URL https://arxiv.org/abs/2411.07681." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 663, + 541, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 663, + 541, + 689 + ], + "spans": [ + { + "bbox": [ + 69, + 663, + 541, + 689 + ], + "type": "text", + "content": "Sayash Kapoor, Benedikt Stroebl, Zachary S Siegel, Nitya Nadgir, and Arvind Narayanan. Ai agents that matter. arXiv preprint arXiv:2407.01502, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 696, + 541, + 733 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 696, + 541, + 733 + ], + "spans": [ + { + "bbox": [ + 70, + 696, + 541, + 733 + ], + "type": "text", + "content": "Amirhossein Kazemnejad, Milad Aghajohari, Eva Portelance, Alessandro Sordoni, Siva Reddy, Aaron Courville, and Nicolas Le Roux. Vineppo: Unlocking rl potential for llm reasoning through refined credit assignment. arXiv preprint arXiv:2410.01679, 2024." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "50" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 49 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 81, + 541, + 734 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 69, + 81, + 541, + 108 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 81, + 541, + 108 + ], + "spans": [ + { + "bbox": [ + 69, + 81, + 541, + 108 + ], + "type": "text", + "content": "Zixuan Ke and Bing Liu. Continual learning of natural language processing tasks: A survey, 2023. URL https://arxiv.org/abs/2211.12701." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 114, + 541, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 114, + 541, + 140 + ], + "spans": [ + { + "bbox": [ + 69, + 114, + 541, + 140 + ], + "type": "text", + "content": "Zixuan Ke, Yijia Shao, Haowei Lin, Tatsuya Konishi, Gyuhak Kim, and Bing Liu. Continual pre-training of language models, 2023. URL https://arxiv.org/abs/2302.03241." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 147, + 541, + 174 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 147, + 541, + 174 + ], + "spans": [ + { + "bbox": [ + 69, + 147, + 541, + 174 + ], + "type": "text", + "content": "Zixuan Ke, Weize Kong, Cheng Li, Mingyang Zhang, Qiaozhu Mei, and Michael Bendersky. Bridging the preference gap between retrievers and llms, 2024. URL https://arxiv.org/abs/2401.06954." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 180, + 541, + 254 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 180, + 541, + 254 + ], + "spans": [ + { + "bbox": [ + 70, + 180, + 541, + 254 + ], + "type": "text", + "content": "Zixuan Ke, Yifei Ming, and Shafiq Joty. Adaptation of large language models. In Maria Lomeli, Swabha Swayamdipta, and Rui Zhang (eds.), Proceedings of the 2025 Annual Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 5: Tutorial Abstracts), pp. 30-37, Albuquerque, New Mexico, May 2025a. Association for Computational Linguistics. ISBN 979-8-89176-193-3. doi: 10.18653/v1/2025.naacl-tutorial.5. URL https://aclanthology.org/2025.naacl-tutorial.5/." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 261, + 541, + 287 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 261, + 541, + 287 + ], + "spans": [ + { + "bbox": [ + 69, + 261, + 541, + 287 + ], + "type": "text", + "content": "Zixuan Ke, Yifei Ming, Xuan-Phi Nguyen, Caiming Xiong, and Shafiq Joty. Demystifying domain-adaptive post-training for financial llms. arXiv preprint arXiv:2501.04961, 2025b." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 293, + 541, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 293, + 541, + 320 + ], + "spans": [ + { + "bbox": [ + 69, + 293, + 541, + 320 + ], + "type": "text", + "content": "Zixuan Ke, Austin Xu, Yifei Ming, Xuan-Phi Nguyen, Caiming Xiong, and Shafiq Joty. Mas-zero: Designing multi-agent systems with zero supervision, 2025c. URL https://arxiv.org/abs/2505.14996." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 327, + 541, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 327, + 541, + 365 + ], + "spans": [ + { + "bbox": [ + 69, + 327, + 541, + 365 + ], + "type": "text", + "content": "Omar Khattab, Keshav Santhanam, Xiang Lisa Li, David Hall, Percy Liang, Christopher Potts, and Matei Zaharia. Demonstrate-search-predict: Composing retrieval and language models for knowledge-intensive nlp. arXiv preprint arXiv:2212.14024, 2022." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 372, + 541, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 372, + 541, + 422 + ], + "spans": [ + { + "bbox": [ + 70, + 372, + 541, + 422 + ], + "type": "text", + "content": "Tushar Khot, Harsh Trivedi, Matthew Finlayson, Yao Fu, Kyle Richardson, Peter Clark, and Ashish Sabharwal. Decomposed prompting: A modular approach for solving complex tasks. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=nGgzQjzaRy." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 429, + 541, + 466 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 429, + 541, + 466 + ], + "spans": [ + { + "bbox": [ + 69, + 429, + 541, + 466 + ], + "type": "text", + "content": "Dongkwan Kim, Junho Myung, and Alice Oh. Salad-bowl-LLM: Multi-culture LLMs by in-context demonstrations from diverse cultures. In Workshop on Socially Responsible Language Modelling Research, 2024a. URL https://openreview.net/forum?id=KsAfPGPZZn." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 474, + 541, + 511 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 474, + 541, + 511 + ], + "spans": [ + { + "bbox": [ + 69, + 474, + 541, + 511 + ], + "type": "text", + "content": "Seungone Kim, Jamin Shin, Yejin Cho, Joel Jang, Shayne Longpre, Hwaran Lee, Sangdoo Yun, Seongjin Shin, Sungdong Kim, James Thorne, et al. Prometheus: Inducing fine-grained evaluation capability in language models. In The Twelfth International Conference on Learning Representations, 2023." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 518, + 541, + 556 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 518, + 541, + 556 + ], + "spans": [ + { + "bbox": [ + 69, + 518, + 541, + 556 + ], + "type": "text", + "content": "Seungone Kim, Juyoung Suk, Ji Yong Cho, Shayne Longpre, Chaeun Kim, Dongkeun Yoon, Guijin Son, Yejin Cho, Sheikh Shafayat, Jinheon Baek, et al. The biggen bench: A principled benchmark for fine-grained evaluation of language models with language models. arXiv preprint arXiv:2406.05761, 2024b." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 563, + 541, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 563, + 541, + 601 + ], + "spans": [ + { + "bbox": [ + 69, + 563, + 541, + 601 + ], + "type": "text", + "content": "Seungone Kim, Juyoung Suk, Shayne Longpre, Bill Yuchen Lin, Jamin Shin, Sean Welleck, Graham Neubig, Moontae Lee, Kyungjae Lee, and Minjoon Seo. Prometheus 2: An open source language model specialized in evaluating other language models. arXiv preprint arXiv:2405.01535, 2024c." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 608, + 541, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 608, + 541, + 646 + ], + "spans": [ + { + "bbox": [ + 69, + 608, + 541, + 646 + ], + "type": "text", + "content": "Sunghwan Kim, Dongjin Kang, Taeyoon Kwon, Hyungjoo Chae, Jungsoo Won, Dongha Lee, and Jinyoung Yeo. Evaluating robustness of reward models for mathematical reasoning, 2024d. URL https://arxiv.org/abs/2410.01729." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 653, + 541, + 680 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 653, + 541, + 680 + ], + "spans": [ + { + "bbox": [ + 69, + 653, + 541, + 680 + ], + "type": "text", + "content": "Takeshi Kojima, Shixiang Shane Gu, Machel Reid, Yutaka Matsuo, and Yusuke Iwasawa. Large language models are zero-shot reasoners. Advances in neural information processing systems, 35:22199-22213, 2022." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 686, + 541, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 686, + 541, + 700 + ], + "spans": [ + { + "bbox": [ + 69, + 686, + 541, + 700 + ], + "type": "text", + "content": "Wouter Kool, Herke van Hoof, and Max Welling. Buy 4 reinforce samples, get a baseline for free! 2019." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 69, + 707, + 541, + 734 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 707, + 541, + 734 + ], + "spans": [ + { + "bbox": [ + 69, + 707, + 541, + 734 + ], + "type": "text", + "content": "Michal Kosinski. Evaluating large language models in theory of mind tasks. Proceedings of the National Academy of Sciences, 121(45):e2405460121, 2024." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 751, + 310, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 310, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 310, + 761 + ], + "type": "text", + "content": "51" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 50 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 81, + 541, + 733 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 70, + 81, + 541, + 108 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 81, + 541, + 108 + ], + "spans": [ + { + "bbox": [ + 70, + 81, + 541, + 108 + ], + "type": "text", + "content": "Julia Kreutzer, Artem Sokolov, and Stefan Riezler. Bandit structured prediction for neural sequence-to-sequence learning. arXiv preprint arXiv:1704.06497, 2017." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 114, + 541, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 114, + 541, + 175 + ], + "spans": [ + { + "bbox": [ + 70, + 114, + 541, + 175 + ], + "type": "text", + "content": "Aviral Kumar, Vincent Zhuang, Rishabh Agarwal, Yi Su, John D. Co-Reyes, Avi Singh, Kate Baumli, Shariq Iqbal, Colton Bishop, Rebecca Roelofs, Lei M. Zhang, Kay McKinney, Disha Shrivastava, Cosmin Paduraru, George Tucker, Doina Precup, Feryal M. P. Behbahani, and Aleksandra Faust. Training language models to self-correct via reinforcement learning. CoRR, abs/2409.12917, 2024. doi: 10.48550/ARXIV.2409.12917. URL https://doi.org/10.48550/arXiv.2409.12917." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 182, + 541, + 220 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 182, + 541, + 220 + ], + "spans": [ + { + "bbox": [ + 70, + 182, + 541, + 220 + ], + "type": "text", + "content": "Xin Lai, Zhuotao Tian, Yukang Chen, Senqiao Yang, Xiangru Peng, and Jiaya Jia. Step-dpo: Step-wise preference optimization for long-chain reasoning of llms. CoRR, abs/2406.18629, 2024. doi: 10.48550/ ARXIV.2406.18629. URL https://doi.org/10.48550/arXiv.2406.18629." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 227, + 541, + 288 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 227, + 541, + 288 + ], + "spans": [ + { + "bbox": [ + 70, + 227, + 541, + 288 + ], + "type": "text", + "content": "Nathan Lambert, Jacob Morrison, Valentina Pyatkin, Shengyi Huang, Hamish Ivison, Faeze Brahman, Lester James V. Miranda, Alisa Liu, Nouha Dziri, Shane Lyu, Yuling Gu, Saumya Malik, Victoria Graf, Jena D. Hwang, Jiangjiang Yang, Ronan Le Bras, Oyvind Tafjord, Chris Wilhelm, Luca Soldaini, Noah A. Smith, Yizhong Wang, Pradeep Dasigi, and Hannaneh Hajishirzi. Tülu 3: Pushing frontiers in open language model post-training. 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 295, + 541, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 295, + 541, + 320 + ], + "spans": [ + { + "bbox": [ + 69, + 295, + 541, + 320 + ], + "type": "text", + "content": "Qiangfeng Peter Lau, Mong-Li Lee, and Wynne Hsu. Coordination guided reinforcement learning. In AAMAS, pp. 215-222, 2012." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 327, + 541, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 327, + 541, + 365 + ], + "spans": [ + { + "bbox": [ + 70, + 327, + 541, + 365 + ], + "type": "text", + "content": "Harrison Lee, Samrat Phatale, Hassan Mansoor, Kellie Ren Lu, Thomas Mesnard, Johan Ferret, Colton Bishop, Ethan Hall, Victor Carbune, and Abhinav Rastogi. Rlaif: Scaling reinforcement learning from human feedback with ai feedback. 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 372, + 541, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 372, + 541, + 409 + ], + "spans": [ + { + "bbox": [ + 70, + 372, + 541, + 409 + ], + "type": "text", + "content": "Sangmin Lee, Minzhi Li, Bolin Lai, Wenqi Jia, Fiona Ryan, Xu Cao, Ozgur Kara, Bikram Boote, Weiyan Shi, Diyi Yang, et al. Towards social ai: A survey on understanding social interactions. arXiv preprint arXiv:2409.15316, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 416, + 541, + 478 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 416, + 541, + 478 + ], + "spans": [ + { + "bbox": [ + 70, + 416, + 541, + 478 + ], + "type": "text", + "content": "Itay Levy, Ben Boin, and Jonathan Berant. Diverse demonstrations improve in-context compositional generalization. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 1401-1422, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.78. URL https://aclanthology.org/2023.acl-long.78/." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 485, + 541, + 522 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 485, + 541, + 522 + ], + "spans": [ + { + "bbox": [ + 70, + 485, + 541, + 522 + ], + "type": "text", + "content": "Chengshu Li, Jacky Liang, Andy Zeng, Xinyun Chen, Karol Hausman, Dorsa Sadigh, Sergey Levine, Li Fei-Fei, Fei Xia, and Brian Ichter. Chain of code: Reasoning with a language model-augmented code emulator. arXiv preprint arXiv:2312.04474, 2023a." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 529, + 541, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 529, + 541, + 590 + ], + "spans": [ + { + "bbox": [ + 70, + 529, + 541, + 590 + ], + "type": "text", + "content": "Haoran Li, Qingxiu Dong, Zhengyang Tang, Chaojun Wang, Xingxing Zhang, Haoyang Huang, Shaohan Huang, Xiaolong Huang, Zeqiang Huang, Dongdong Zhang, Yuxian Gu, Xin Cheng, Xun Wang, Si-Qing Chen, Li Dong, Wei Lu, Zhifang Sui, Benyou Wang, Wai Lam, and Furu Wei. Synthetic data (almost) from scratch: Generalized instruction tuning for language models. CoRR, abs/2402.13064, 2024a. URL https://doi.org/10.48550/arXiv.2402.13064." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 597, + 541, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 597, + 541, + 624 + ], + "spans": [ + { + "bbox": [ + 69, + 597, + 541, + 624 + ], + "type": "text", + "content": "Junlong Li, Shichao Sun, Weizhe Yuan, Run-Ze Fan, Hai Zhao, and Pengfei Liu. Generative judge for evaluating alignment. arXiv preprint arXiv:2310.05470, 2023b." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 630, + 541, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 630, + 541, + 669 + ], + "spans": [ + { + "bbox": [ + 70, + 630, + 541, + 669 + ], + "type": "text", + "content": "Junyou Li, Qin Zhang, Yangbin Yu, Qiang Fu, and Deheng Ye. More agents is all you need. Transactions on Machine Learning Research, 2024b. ISSN 2835-8856. URL https://openreview.net/forum?id=bgzUSZ8aeg." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 675, + 541, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 675, + 541, + 700 + ], + "spans": [ + { + "bbox": [ + 69, + 675, + 541, + 700 + ], + "type": "text", + "content": "Ming Li, Jiuhai Chen, Lichang Chen, and Tianyi Zhou. Can llms speak for diverse people? tuning llms via debate to generate controllable controversial statements. arXiv preprint arXiv:2402.10614, 2024c." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 707, + 541, + 733 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 707, + 541, + 733 + ], + "spans": [ + { + "bbox": [ + 69, + 707, + 541, + 733 + ], + "type": "text", + "content": "Minzhi Li, Weiyan Shi, Caleb Ziems, and Diyi Yang. Social intelligence data infrastructure: Structuring the present and navigating the future. arXiv preprint arXiv:2403.14659, 2024d." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "52" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 51 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 81, + 541, + 733 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 70, + 81, + 541, + 119 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 81, + 541, + 119 + ], + "spans": [ + { + "bbox": [ + 70, + 81, + 541, + 119 + ], + "type": "text", + "content": "Minzhi Li, Zhengyuan Liu, Shumin Deng, Shafiq Joty, Nancy Chen, and Min-Yen Kan. Dna-eval: Enhancing large language model evaluation through decomposition and aggregation. In Proceedings of the 31st International Conference on Computational Linguistics, pp. 2277-2290, 2025a." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 125, + 541, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 125, + 541, + 152 + ], + "spans": [ + { + "bbox": [ + 70, + 125, + 541, + 152 + ], + "type": "text", + "content": "Mukai Li, Shansan Gong, Jiangtao Feng, Yiheng Xu, Jun Zhang, Zhiyong Wu, and Lingpeng Kong. Incontext learning with many demonstration examples. arXiv preprint arXiv:2302.04931, 2023c." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 157, + 541, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 157, + 541, + 183 + ], + "spans": [ + { + "bbox": [ + 70, + 157, + 541, + 183 + ], + "type": "text", + "content": "Ruosen Li, Teerth Patel, and Xinya Du. Prd: Peer rank and discussion improve large language model based evaluations. arXiv preprint arXiv:2307.02762, 2023d." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 190, + 541, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 190, + 541, + 215 + ], + "spans": [ + { + "bbox": [ + 70, + 190, + 541, + 215 + ], + "type": "text", + "content": "Sheng Li, Jayesh K Gupta, Peter Morales, Ross Allen, and Mykel J Kochenderfer. Deep implicit coordination graphs for multi-agent reinforcement learning. arXiv preprint arXiv:2006.11438, 2020." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 221, + 541, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 221, + 541, + 293 + ], + "spans": [ + { + "bbox": [ + 70, + 221, + 541, + 293 + ], + "type": "text", + "content": "Xiaonan Li, Kai Lv, Hang Yan, Tianyang Lin, Wei Zhu, Yuan Ni, Guotong Xie, Xiaoling Wang, and Xipeng Qiu. Unified demonstration retriever for in-context learning. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 4644-4668, Toronto, Canada, July 2023e. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.256. URL https://aclanthology.org/2023.acl-long.256/." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 300, + 541, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 300, + 541, + 338 + ], + "spans": [ + { + "bbox": [ + 70, + 300, + 541, + 338 + ], + "type": "text", + "content": "Xingxuan Li, Ruochen Zhao, Yew Ken Chia, Bosheng Ding, Shafiq Joty, Soujanya Poria, and Lidong Bing. Chain-of-knowledge: Grounding large language models via dynamic knowledge adapting over heterogeneous sources, 2024e. URL https://arxiv.org/abs/2305.13269." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 345, + 541, + 383 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 345, + 541, + 383 + ], + "spans": [ + { + "bbox": [ + 70, + 345, + 541, + 383 + ], + "type": "text", + "content": "Yang Li, Wenhao Zhang, Jianhong Wang, Shao Zhang, Yali Du, Ying Wen, and Wei Pan. Aligning individual and collective objectives in multi-agent cooperation. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024f. URL https://openreview.net/forum?id=2YSHEBRRol." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 388, + 541, + 473 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 388, + 541, + 473 + ], + "spans": [ + { + "bbox": [ + 70, + 388, + 541, + 473 + ], + "type": "text", + "content": "Yujia Li, David Choi, Junyoung Chung, Nate Kushman, Julian Schrittwieser, Rémi Leblond, Tom Eccles, James Keeling, Felix Gimeno, Agustin Dal Lago, Thomas Hubert, Peter Choy, Cyprien de Masson d'Autume, Igor Babuschkin, Xinyun Chen, Po-Sen Huang, Johannes Welbl, Sven Gowal, Alexey Cherepanov, James Molloy, Daniel J. Mankowitz, Esme Sutherland Robson, Pushmeet Kohli, Nando de Freitas, Koray Kavukcuoglu, and Oriol Vinyals. Competition-level code generation with alphabet. Science, 378(6624):1092-1097, December 2022. ISSN 1095-9203. doi: 10.1126/science.abq1158. URL http://dx.doi.org/10.1126/science.abq1158." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 480, + 541, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 480, + 541, + 529 + ], + "spans": [ + { + "bbox": [ + 70, + 480, + 541, + 529 + ], + "type": "text", + "content": "Zenan Li, Zhaoyu Li, Wen Tang, Xian Zhang, Yuan Yao, Xujie Si, Fan Yang, Kaiyu Yang, and Xiaoxing Ma. Proving olympiad inequalities by synergizing LLMs and symbolic reasoning. In The Thirteenth International Conference on Learning Representations, 2025b. URL https://openreview.net/forum?id=FiyS0ecSm0." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 536, + 541, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 536, + 541, + 597 + ], + "spans": [ + { + "bbox": [ + 70, + 536, + 541, + 597 + ], + "type": "text", + "content": "Zhaoyi Li, Gangwei Jiang, Hong Xie, Linqi Song, Defu Lian, and Ying Wei. Understanding and patching compositional reasoning in LLMs. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Findings of the Association for Computational Linguistics: ACL 2024, pp. 9668-9688, Bangkok, Thailand, August 2024g. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-acl.576. URL https://aclanthology.org/2024-findings-acl.576/." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 604, + 541, + 641 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 604, + 541, + 641 + ], + "spans": [ + { + "bbox": [ + 69, + 604, + 541, + 641 + ], + "type": "text", + "content": "Zhiyuan Li, Hong Liu, Denny Zhou, and Tengyu Ma. Chain of thought empowers transformers to solve inherently serial problems. In The Twelfth International Conference on Learning Representations, 2024h. URL https://openreview.net/forum?id=3EWTEy9MTM." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 647, + 541, + 733 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 647, + 541, + 733 + ], + "spans": [ + { + "bbox": [ + 70, + 647, + 541, + 733 + ], + "type": "text", + "content": "Percy Liang, Rishi Bommasani, Tony Lee, Dimitris Tsipras, Dilara Soylu, Michihiro Yasunaga, Yian Zhang, Deepak Narayanan, Yuhuai Wu, Ananya Kumar, Benjamin Newman, Binhang Yuan, Bobby Yan, Ce Zhang, Christian Alexander Cosgrove, Christopher D Manning, Christopher Re, Diana Acosta-Navas, Drew Arad Hudson, Eric Zelikman, Esin Durmus, Faisal Ladhak, Frieda Rong, Hongyu Ren, Huaxiu Yao, Jue WANG, Keshav Santhanam, Laurel Orr, Lucia Zheng, Mert Yuksekgonul, Mirac Suzgun, Nathan Kim, Neel Guha, Niladri S. Chatterji, Omar Khattab, Peter Henderson, Qian Huang, Ryan Andrew Chi, Sang Michael Xie, Shibani Santurkar, Surya Ganguli, Tatsunori Hashimoto, Thomas Icard, Tianyi Zhang," + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "53" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 52 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 81, + 541, + 732 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 78, + 81, + 541, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 81, + 541, + 118 + ], + "spans": [ + { + "bbox": [ + 78, + 81, + 541, + 118 + ], + "type": "text", + "content": "Vishrav Chaudhary, William Wang, Xuechen Li, Yifan Mai, Yuhui Zhang, and Yuta Koreeda. Holistic evaluation of language models. Transactions on Machine Learning Research, 2023a. ISSN 2835-8856. URL https://openreview.net/forum?id=i04LZibEqW. Featured Certification, Expert Certification." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 125, + 541, + 163 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 125, + 541, + 163 + ], + "spans": [ + { + "bbox": [ + 70, + 125, + 541, + 163 + ], + "type": "text", + "content": "Tian Liang, Zhiwei He, Wenxiang Jiao, Xing Wang, Yan Wang, Rui Wang, Yujiu Yang, Shuming Shi, and Zhaopeng Tu. Encouraging divergent thinking in large language models through multi-agent debate. arXiv preprint arXiv:2305.19118, 2023b." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 170, + 541, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 170, + 541, + 206 + ], + "spans": [ + { + "bbox": [ + 70, + 170, + 541, + 206 + ], + "type": "text", + "content": "Yancheng Liang, Daphne Chen, Abhishek Gupta, Simon Shaolei Du, and Natasha Jaques. Learning to cooperate with humans using generative agents. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. URL https://openreview.net/forum?id=v4dXL3LsGX." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 213, + 541, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 213, + 541, + 262 + ], + "spans": [ + { + "bbox": [ + 70, + 213, + 541, + 262 + ], + "type": "text", + "content": "Hunter Lightman, Vineet Kosaraju, Yuri Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=v8LOpN6EOi." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 270, + 541, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 270, + 541, + 342 + ], + "spans": [ + { + "bbox": [ + 70, + 270, + 541, + 342 + ], + "type": "text", + "content": "Bill Yuchen Lin, Seyeon Lee, Xiaoyang Qiao, and Xiang Ren. Common sense beyond English: Evaluating and improving multilingual language models for commonsense reasoning. In Chengqing Zong, Fei Xia, Wenjie Li, and Roberto Navigli (eds.), Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pp. 1274-1287, Online, August 2021. Association for Computational Linguistics. doi: 10.18653/v1/2021.acl-long.102. URL https://aclanthology.org/2021.acl-long.102/." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 349, + 541, + 410 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 349, + 541, + 410 + ], + "spans": [ + { + "bbox": [ + 70, + 349, + 541, + 410 + ], + "type": "text", + "content": "Weizhe Lin, Rexhina Blloshmi, Bill Byrne, Adria de Gispert, and Gonzalo Iglesias. An inner table retriever for robust table question answering. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 9909–9926, Toronto, Canada, July 2023a. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.551. URL https://aclanthology.org/2023.acl-long.551/." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 417, + 541, + 490 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 417, + 541, + 490 + ], + "spans": [ + { + "bbox": [ + 70, + 417, + 541, + 490 + ], + "type": "text", + "content": "Weizhe Lin, Rexhina Blloshmi, Bill Byrne, Adria de Gispert, and Gonzalo Iglesias. LI-RAGE: Late interaction retrieval augmented generation with explicit signals for open-domain table question answering. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pp. 1557-1566, Toronto, Canada, July 2023b. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-short.133. URL https://aclanthology.org/2023.acl-short.133/." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 496, + 541, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 496, + 541, + 532 + ], + "spans": [ + { + "bbox": [ + 70, + 496, + 541, + 532 + ], + "type": "text", + "content": "Chris Yuhao Liu, Liang Zeng, Jiacai Liu, Rui Yan, Jujie He, Chaojie Wang, Shuicheng Yan, Yang Liu, and Yahui Zhou. Skywork-reward: Bag of tricks for reward modeling in llms. arXiv preprint arXiv:2410.18451, 2024a." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 540, + 539, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 540, + 539, + 566 + ], + "spans": [ + { + "bbox": [ + 69, + 540, + 539, + 566 + ], + "type": "text", + "content": "Hanmeng Liu, Zhizhang Fu, Mengru Ding, Ruoxi Ning, Chaoli Zhang, Xiaozhang Liu, and Yue Zhang. Logical reasoning in large language models: A survey. arXiv preprint arXiv:2502.09100, 2025a." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 572, + 541, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 572, + 541, + 644 + ], + "spans": [ + { + "bbox": [ + 70, + 572, + 541, + 644 + ], + "type": "text", + "content": "Jiachang Liu, Dinghan Shen, Yizhe Zhang, Bill Dolan, Lawrence Carin, and Weizhu Chen. What makes good in-context examples for GPT-3? In Eneko Agirre, Marianna Apidianaki, and Ivan Vulić (eds.), Proceedings of Deep Learning Inside Out (DeeLIO 2022): The 3rd Workshop on Knowledge Extraction and Integration for Deep Learning Architectures, pp. 100–114, Dublin, Ireland and Online, May 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.deelio-1.10. URL https://aclanthology.org/2022.deelio-1.10/." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 651, + 541, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 651, + 541, + 689 + ], + "spans": [ + { + "bbox": [ + 69, + 651, + 541, + 689 + ], + "type": "text", + "content": "Liang Liu, Dong Zhang, Shoushan Li, Guodong Zhou, and Erik Cambria. Two heads are better than one: Zero-shot cognitive reasoning via multi-llm knowledge fusion. In Proceedings of the 33rd ACM International Conference on Information and Knowledge Management, pp. 1462–1472, 2024b." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 696, + 541, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 696, + 541, + 732 + ], + "spans": [ + { + "bbox": [ + 69, + 696, + 541, + 732 + ], + "type": "text", + "content": "Ryan Liu, Jiayi Geng, Addison J. Wu, Ilia Sucholutsky, Tania Lombrozo, and Thomas L. Griffiths. Mind your step (by step): Chain-of-thought can reduce performance on tasks where thinking makes humans worse, 2024c. URL https://arxiv.org/abs/2410.21333." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "54" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 53 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 81, + 541, + 734 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 70, + 81, + 541, + 155 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 81, + 541, + 155 + ], + "spans": [ + { + "bbox": [ + 70, + 81, + 541, + 155 + ], + "type": "text", + "content": "Tianyang Liu, Fei Wang, and Muhao Chen. Rethinking tabular data understanding with large language models. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 450-482, Mexico City, Mexico, June 2024d. Association for Computational Linguistics. doi: 10.18653/v1/2024.naacl-long.26. URL https://aclanthology.org/2024.naacl-long.26/." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 162, + 541, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 162, + 541, + 201 + ], + "spans": [ + { + "bbox": [ + 70, + 162, + 541, + 201 + ], + "type": "text", + "content": "Tongxuan Liu, Xingyu Wang, Weizhe Huang, Wenjiang Xu, Yuting Zeng, Lei Jiang, Hailong Yang, and Jing Li. Groupdebate: Enhancing the efficiency of multi-agent debate using group discussion. arXiv preprint arXiv:2409.14051, 2024e." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 208, + 541, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 208, + 541, + 246 + ], + "spans": [ + { + "bbox": [ + 70, + 208, + 541, + 246 + ], + "type": "text", + "content": "Yanchen Liu, Srishti Gautam, Jiaqi Ma, and Himabindu Lakkaraju. Investigating the fairness of large language models for predictions on tabular data. In *Socially Responsible Language Modelling Research*, 2023. URL https://openreview.net/forum?id=V1740FqidS." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 252, + 541, + 280 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 252, + 541, + 280 + ], + "spans": [ + { + "bbox": [ + 70, + 252, + 541, + 280 + ], + "type": "text", + "content": "Yantao Liu, Zijun Yao, Rui Min, Yixin Cao, Lei Hou, and Juanzi Li. Pairwise rm: Perform best-of-n sampling with knockout tournament. arXiv preprint arXiv:2501.13007, 2025b." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 286, + 541, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 286, + 541, + 335 + ], + "spans": [ + { + "bbox": [ + 70, + 286, + 541, + 335 + ], + "type": "text", + "content": "Zhihan Liu, Hao Hu, Shenao Zhang, Hongyi Guo, Shuqi Ke, Boyi Liu, and Zhaoran Wang. Reason for future, act for now: A principled architecture for autonomous LLM agents. In Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pp. 31186-31261. PMLR, 21-27 Jul 2024f." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 342, + 541, + 370 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 342, + 541, + 370 + ], + "spans": [ + { + "bbox": [ + 69, + 342, + 541, + 370 + ], + "type": "text", + "content": "Zichen Liu, Changyu Chen, Wenjun Li, Tianyu Pang, Chao Du, and Min Lin. There may not be a hah moment in r1-zero-like training — a pilot study. https://oatllm.notion.site/oat-zero, 2025c. Notion Blog." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 376, + 541, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 376, + 541, + 415 + ], + "spans": [ + { + "bbox": [ + 70, + 376, + 541, + 415 + ], + "type": "text", + "content": "Do Xuan Long, Hai Nguyen Ngoc, Tiviatis Sim, Hieu Dao, Shafiq Joty, Kenji Kawaguchi, Nancy F Chen, and Min-Yen Kan. Llms are biased towards output formats! systematically evaluating and mitigating output format bias of llms. arXiv preprint arXiv:2408.08656, 2024a." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 421, + 541, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 421, + 541, + 495 + ], + "spans": [ + { + "bbox": [ + 70, + 421, + 541, + 495 + ], + "type": "text", + "content": "Do Xuan Long, Duong Ngoc Yen, Anh Tuan Luu, Kenji Kawaguchi, Min-Yen Kan, and Nancy F. Chen. Multi-expert prompting improves reliability, safety and usefulness of large language models. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (eds.), Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pp. 20370-20401, Miami, Florida, USA, November 2024b. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.1135. URL https://aclanthology.org/2024.emnlp-main.1135/." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 502, + 541, + 576 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 502, + 541, + 576 + ], + "spans": [ + { + "bbox": [ + 70, + 502, + 541, + 576 + ], + "type": "text", + "content": "Do Xuan Long, Yiran Zhao, Hannah Brown, Yuxi Xie, James Zhao, Nancy Chen, Kenji Kawaguchi, Michael Shieh, and Junxian He. Prompt optimization via adversarial in-context learning. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 7308-7327, Bangkok, Thailand, August 2024c. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.395. URL https://aclanthology.org/2024.acl-long.395/." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 583, + 541, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 583, + 541, + 622 + ], + "spans": [ + { + "bbox": [ + 70, + 583, + 541, + 622 + ], + "type": "text", + "content": "Pan Lu, Baolin Peng, Hao Cheng, Michel Galley, Kai-Wei Chang, Ying Nian Wu, Song-Chun Zhu, and Jianfeng Gao. Chameleon: Plug-and-play compositional reasoning with large language models. Advances in Neural Information Processing Systems, 36, 2024a." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 628, + 541, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 628, + 541, + 656 + ], + "spans": [ + { + "bbox": [ + 70, + 628, + 541, + 656 + ], + "type": "text", + "content": "Weizheng Lu, Jing Zhang, Ju Fan, Zihao Fu, Yueguo Chen, and Xiaoyong Du. Large language model for table processing: A survey. Frontiers of Computer Science, 19(2):192350, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 662, + 541, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 662, + 541, + 689 + ], + "spans": [ + { + "bbox": [ + 69, + 662, + 541, + 689 + ], + "type": "text", + "content": "Xinyuan Lu, Liangming Pan, Yubo Ma, Preslav Nakov, and Min-Yen Kan. Tart: An open-source tool-augmented framework for explainable table-based reasoning. arXiv preprint arXiv:2409.11724, 2024b." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 695, + 541, + 734 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 695, + 541, + 734 + ], + "spans": [ + { + "bbox": [ + 70, + 695, + 541, + 734 + ], + "type": "text", + "content": "Haipeng Luo, Qingfeng Sun, Can Xu, Pu Zhao, Jianguang Lou, Chongyang Tao, Xiubo Geng, Qingwei Lin, Shifeng Chen, and Dongmei Zhang. Wizardmath: Empowering mathematical reasoning for large language models via reinforced evol-instruct. arXiv preprint arXiv:2308.09583, 2023a." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "55" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 54 + }, + { + "para_blocks": [ + { + "bbox": [ + 70, + 81, + 541, + 732 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 70, + 81, + 541, + 130 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 81, + 541, + 130 + ], + "spans": [ + { + "bbox": [ + 70, + 81, + 541, + 130 + ], + "type": "text", + "content": "Haipeng Luo, Qingfeng Sun, Can Xu, Pu Zhao, Jianguang Lou, Chongyang Tao, Xiubo Geng, Qingwei Lin, Shifeng Chen, and Dongmei Zhang. Wizardmath: Empowering mathematical reasoning for large language models via reinforced evol-instruct. CoRR, abs/2308.09583, 2023b. doi: 10.48550/ARXIV.2308.09583. URL https://doi.org/10.48550/arXiv.2308.09583." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 140, + 541, + 176 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 140, + 541, + 176 + ], + "spans": [ + { + "bbox": [ + 70, + 140, + 541, + 176 + ], + "type": "text", + "content": "Kangyang Luo, Zichen Ding, Zhenmin Weng, Lingfeng Qiao, Meng Zhao, Xiang Li, Di Yin, and Jinlong Shu. Let's be self-generated via step by step: A curriculum learning approach to automated reasoning with large language models. arXiv preprint arXiv:2410.21728, 2024a. URL https://arxiv.org/abs/2410.21728." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 185, + 541, + 233 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 185, + 541, + 233 + ], + "spans": [ + { + "bbox": [ + 70, + 185, + 541, + 233 + ], + "type": "text", + "content": "Liangchen Luo, Yinxiao Liu, Rosanne Liu, Samrat Phatale, Harsh Lara, Yunxuan Li, Lei Shu, Yun Zhu, Lei Meng, Jiao Sun, and Abhinav Rastogi. Improve mathematical reasoning in language models by automated process supervision. CoRR, abs/2406.06592, 2024b. doi: 10.48550/ARXIV.2406.06592. URL https://doi.org/10.48550/arXiv.2406.06592." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 243, + 541, + 280 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 243, + 541, + 280 + ], + "spans": [ + { + "bbox": [ + 70, + 243, + 541, + 280 + ], + "type": "text", + "content": "Liangchen Luo, Yinxiao Liu, Rosanne Liu, Samrat Phatale, Harsh Lara, Yunxuan Li, Lei Shu, Yun Zhu, Lei Meng, Jiao Sun, et al. Improve mathematical reasoning in language models by automated process supervision. arXiv preprint arXiv:2406.06592, 2024c." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 289, + 541, + 325 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 289, + 541, + 325 + ], + "spans": [ + { + "bbox": [ + 70, + 289, + 541, + 325 + ], + "type": "text", + "content": "Man Luo, Xin Xu, Zhuyun Dai, Panupong Pasupat, Mehran Kazemi, Chitta Baral, Vaiva Imbrasaite, and Vincent Y Zhao. Dr. icl: Demonstration-retrieved in-context learning. arXiv preprint arXiv:2305.14128, 2023c." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 335, + 541, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 335, + 541, + 371 + ], + "spans": [ + { + "bbox": [ + 70, + 335, + 541, + 371 + ], + "type": "text", + "content": "Man Luo, Xin Xu, Yue Liu, Panupong Pasupat, and Mehran Kazemi. In-context learning with retrieved demonstrations for language models: A survey. Transactions on Machine Learning Research, 2024d. ISSN 2835-8856. URL https://openreview.net/forum?id=NQPo8ZhQPa. Survey Certification." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 380, + 541, + 465 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 380, + 541, + 465 + ], + "spans": [ + { + "bbox": [ + 70, + 380, + 541, + 465 + ], + "type": "text", + "content": "Qing Lyu, Shreya Havaldar, Adam Stein, Li Zhang, Delip Rao, Eric Wong, Marianna Apidianaki, and Chris Callison-Burch. Faithful chain-of-thought reasoning. In Jong C. Park, Yuki Arase, Baotian Hu, Wei Lu, Derry Wijaya, Ayu Purwarianti, and Adila Alfa Krisnadhi (eds.), Proceedings of the 13th International Joint Conference on Natural Language Processing and the 3rd Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 305-329, Nusa Dua, Bali, November 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.ijcnlp-main.20. URL https://aclanthology.org/2023.ijcnlp-main.20/." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 474, + 541, + 545 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 474, + 541, + 545 + ], + "spans": [ + { + "bbox": [ + 70, + 474, + 541, + 545 + ], + "type": "text", + "content": "Yubo Ma, Zhibin Gou, Junheng Hao, Ruochen Xu, Shuohang Wang, Liangming Pan, Yujiu Yang, Yixin Cao, and Aixin Sun. Sciagent: Tool-augmented language models for scientific reasoning. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (eds.), Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, EMNLP 2024, Miami, FL, USA, November 12-16, 2024, pp. 15701-15736. Association for Computational Linguistics, 2024a. URL https://aclanthology.org/2024.emnlp-main.880." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 555, + 541, + 593 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 555, + 541, + 593 + ], + "spans": [ + { + "bbox": [ + 70, + 555, + 541, + 593 + ], + "type": "text", + "content": "Yubo Ma, Zhibin Gou, Junheng Hao, Ruochen Xu, Shuohang Wang, Liangming Pan, Yujiu Yang, Yixin Cao, Aixin Sun, Hany Awadalla, et al. Sciagent: Tool-augmented language models for scientific reasoning. arXiv preprint arXiv:2402.11451, 2024b." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 602, + 541, + 662 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 602, + 541, + 662 + ], + "spans": [ + { + "bbox": [ + 70, + 602, + 541, + 662 + ], + "type": "text", + "content": "Aman Madaan, Katherine Hermann, and Amir Yazdanbakhsh. What makes chain-of-thought prompting effective? a counterfactual study. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Findings of the Association for Computational Linguistics: EMNLP 2023, pp. 1448-1535, Singapore, December 2023a. Association for Computational Linguistics. doi: 10.18653/v1/2023.findings-emnlp.101. URL https://aclanthology.org/2023-findings-emnlp.101/." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 672, + 541, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 672, + 541, + 732 + ], + "spans": [ + { + "bbox": [ + 70, + 672, + 541, + 732 + ], + "type": "text", + "content": "Aman Madaan, Niket Tandon, Prakhar Gupta, Skyler Hallinan, Luyu Gao, Sarah Wiegreffe, Uri Alon, Nouha Dziri, Shrimai Prabhumoye, Yiming Yang, Shashank Gupta, Bodhisattwa Prasad Majumder, Katherine Hermann, Sean Welleck, Amir Yazdanbakhsh, and Peter Clark. Self-refine: Iterative refinement with self-feedback. In Thirty-seventh Conference on Neural Information Processing Systems, 2023b. URL https://openreview.net/forum?id=S37h0erQLB." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "56" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 55 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 81, + 541, + 733 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 70, + 81, + 541, + 119 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 81, + 541, + 119 + ], + "spans": [ + { + "bbox": [ + 70, + 81, + 541, + 119 + ], + "type": "text", + "content": "Aman Madaan, Niket Tandon, Prakhar Gupta, Skyler Hallinan, Luyu Gao, Sarah Wiegreffe, Uri Alon, Nouha Dziri, Shrimai Prabhumoye, Yiming Yang, et al. Self-refine: Iterative refinement with self-feedback. Advances in Neural Information Processing Systems, 36, 2024." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 125, + 541, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 125, + 541, + 161 + ], + "spans": [ + { + "bbox": [ + 70, + 125, + 541, + 161 + ], + "type": "text", + "content": "Dakota Mahan, Duy Van Phung, Rafael Rafailov, Chase Blagden, Nathan Lile, Louis Castricato, Jan-Philipp Franken, Chelsea Finn, and Alon Albalak. Generative reward models. arXiv preprint arXiv:2410.12832, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 169, + 541, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 169, + 541, + 194 + ], + "spans": [ + { + "bbox": [ + 70, + 169, + 541, + 194 + ], + "type": "text", + "content": "XTX Markets. AIMO Progress Prize: July 2024 results. https://aimoprize.com/updates/2024-07-20-progress-prize-results, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 201, + 541, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 201, + 541, + 236 + ], + "spans": [ + { + "bbox": [ + 70, + 201, + 541, + 236 + ], + "type": "text", + "content": "Tula Masterman, Sandi Besen, Mason Sawtell, and Alex Chao. The landscape of emerging ai agent architectures for reasoning, planning, and tool calling: A survey, 2024. URL https://arxiv.org/abs/2404.11584." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 243, + 541, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 243, + 541, + 281 + ], + "spans": [ + { + "bbox": [ + 70, + 243, + 541, + 281 + ], + "type": "text", + "content": "Marco Matta, Gian Carlo Cardarilli, Luca Di Nunzio, Rocco Fazzolari, Daniele Giardino, M Re, F Silvestri, and S Spanò. Q-rts: a real-time swarm intelligence based on multi-agent q-learning. _Electronics Letters_, 55(10):589–591, 2019." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 287, + 541, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 287, + 541, + 312 + ], + "spans": [ + { + "bbox": [ + 69, + 287, + 541, + 312 + ], + "type": "text", + "content": "Nat McAleese, Rai Michael Pokorny, Juan Felipe Ceron Uribe, Evgenia Nitishinskaya, Maja Trebacz, and Jan Leike. Llm critics help catch llm bugs. arXiv preprint arXiv:2407.00215, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 319, + 541, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 319, + 541, + 392 + ], + "spans": [ + { + "bbox": [ + 70, + 319, + 541, + 392 + ], + "type": "text", + "content": "Raja Sekhar Reddy Mekala, Yasaman Razeghi, and Sameer Singh. EchoPrompt: Instructing the model to rephrase queries for improved in-context learning. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 2: Short Papers), pp. 399-432, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.naacl-short.35. URL https://aclanthology.org/2024.naacl-short.35." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 399, + 541, + 424 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 399, + 541, + 424 + ], + "spans": [ + { + "bbox": [ + 70, + 399, + 541, + 424 + ], + "type": "text", + "content": "Yu Meng, Mengzhou Xia, and Danqi Chen. Simpo: Simple preference optimization with a reference-free reward. arXiv preprint arXiv:2405.14734, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 430, + 541, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 430, + 541, + 468 + ], + "spans": [ + { + "bbox": [ + 70, + 430, + 541, + 468 + ], + "type": "text", + "content": "William Merrill and Ashish Sabharwal. The expressive power of transformers with chain of thought. In *The Twelfth International Conference on Learning Representations*, 2024. URL https://openreview.net/forum?id=NjNGLPh8Wh." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 474, + 541, + 511 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 474, + 541, + 511 + ], + "spans": [ + { + "bbox": [ + 70, + 474, + 541, + 511 + ], + "type": "text", + "content": "Yingqian Min, Zhipeng Chen, Jinhao Jiang, Jie Chen, Jia Deng, Yiwen Hu, Yiru Tang, Jiapeng Wang, Xiaoxue Cheng, Huatong Song, et al. Imitate, explore, and self-improve: A reproduction report on slow-thinking reasoning systems. arXiv preprint arXiv:2412.09413, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 517, + 541, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 517, + 541, + 566 + ], + "spans": [ + { + "bbox": [ + 70, + 517, + 541, + 566 + ], + "type": "text", + "content": "Yifei Ming, Senthil Purushwalkam, Shrey Pandit, Zixuan Ke, Xuan-Phi Nguyen, Caiming Xiong, and Shafiq Joty. Faitheval: Can your language model stay faithful to context, even if \"the moon is made of marshmallows\". In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=UeVx6L59fg." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 572, + 541, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 572, + 541, + 622 + ], + "spans": [ + { + "bbox": [ + 70, + 572, + 541, + 622 + ], + "type": "text", + "content": "Seyed Iman Mirzadeh, Keivan Alizadeh, Hooman Shahrokhi, Oncel Tuzel, Samy Bengio, and Mehrdad Farajtabar. GSM-symbolic: Understanding the limitations of mathematical reasoning in large language models. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=AjXkRZIvjb." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 628, + 541, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 628, + 541, + 689 + ], + "spans": [ + { + "bbox": [ + 70, + 628, + 541, + 689 + ], + "type": "text", + "content": "Swaroop Mishra, Daniel Khashabi, Chitta Baral, and Hannaneh Hajishirzi. Cross-task generalization via natural language crowdsourcing instructions. In Smaranda Muresan, Preslav Nakov, and Aline Villavicencio (eds.), Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 3470-3487, Dublin, Ireland, May 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.acl-long.244. URL https://aclanthology.org/2022.acl-long.244/." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 696, + 541, + 733 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 696, + 541, + 733 + ], + "spans": [ + { + "bbox": [ + 70, + 696, + 541, + 733 + ], + "type": "text", + "content": "Philipp Mondorf and Barbara Plank. Beyond accuracy: Evaluating the reasoning behavior of large language models - a survey. In First Conference on Language Modeling, 2024. URL https://openreview.net/forum?id=Lmjgl2n11u." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "text", + "content": "57" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 56 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 81, + 541, + 733 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 70, + 81, + 541, + 119 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 81, + 541, + 119 + ], + "spans": [ + { + "bbox": [ + 70, + 81, + 541, + 119 + ], + "type": "text", + "content": "Nieves Montes, Michael Luck, Nardine Osman, Odinaldo Rodrigues, and Carles Sierra. Combining theory of mind and abductive reasoning in agent-oriented programming. Autonomous Agents and Multi-Agent Systems, 37(2):36, 2023." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 124, + 541, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 124, + 541, + 152 + ], + "spans": [ + { + "bbox": [ + 70, + 124, + 541, + 152 + ], + "type": "text", + "content": "Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettle-moyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 156, + 541, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 156, + 541, + 217 + ], + "spans": [ + { + "bbox": [ + 70, + 156, + 541, + 217 + ], + "type": "text", + "content": "Md Mahadi Hasan Nahid and Davood Rafiei. NormTab: Improving symbolic reasoning in LLMs through tabular data normalization. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (eds.), Findings of the Association for Computational Linguistics: EMNLP 2024, pp. 3569-3585, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-emnlp.203. URL https://aclanthology.org/2024 findings-emnlp.203/." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 223, + 541, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 223, + 541, + 250 + ], + "spans": [ + { + "bbox": [ + 70, + 223, + 541, + 250 + ], + "type": "text", + "content": "Allen Newell, John C Shaw, and Herbert A Simon. Report on a general problem solving program. In IFIP congress, volume 256, pp. 64. Pittsburgh, PA, 1959." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 255, + 541, + 280 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 255, + 541, + 280 + ], + "spans": [ + { + "bbox": [ + 70, + 255, + 541, + 280 + ], + "type": "text", + "content": "Allen Newell, Herbert Alexander Simon, et al. Human problem solving, volume 104. Prentice-hall Englewood Cliffs, NJ, 1972." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 285, + 541, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 285, + 541, + 312 + ], + "spans": [ + { + "bbox": [ + 69, + 285, + 541, + 312 + ], + "type": "text", + "content": "Khanh Nguyen, Hal Daumé III, and Jordan Boyd-Graber. Reinforcement learning for bandit neural machine translation with simulated human feedback. arXiv preprint arXiv:1707.07402, 2017." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 316, + 541, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 316, + 541, + 354 + ], + "spans": [ + { + "bbox": [ + 70, + 316, + 541, + 354 + ], + "type": "text", + "content": "Ansong Ni, Miltiadis Allamanis, Arman Cohan, Yinlin Deng, Kensen Shi, Charles Sutton, and Pengcheng Yin. Next: Teaching large language models to reason about code execution. In ICML, 2024. URL https://openreview.net/forum?id=B1W712hMBi." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 360, + 541, + 386 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 360, + 541, + 386 + ], + "spans": [ + { + "bbox": [ + 70, + 360, + 541, + 386 + ], + "type": "text", + "content": "Tobias Nipkow, Markus Wenzel, and Lawrence C Paulson. Isabelle/HOL: a proof assistant for higher-order logic. 2002." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 392, + 541, + 417 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 392, + 541, + 417 + ], + "spans": [ + { + "bbox": [ + 70, + 392, + 541, + 417 + ], + "type": "text", + "content": "NovaSky Team. Sky-t1: Train your own o1 preview model within $450. https://novaskyai.github.io/posts/sky-t1, 2025. Accessed: 2025-01-09." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 422, + 541, + 472 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 422, + 541, + 472 + ], + "spans": [ + { + "bbox": [ + 70, + 422, + 541, + 472 + ], + "type": "text", + "content": "Maxwell Nye, Anders Andreassen, Guy Gur-Ari, Henryk Witold Michalewski, Jacob Austin, David Bieber, David Martin Dohan, Aitor Lewkowycz, Maarten Paul Bosma, David Luan, Charles Sutton, and Augustus Odena. Show your work: Scratchpads for intermediate computation with language models, 2021. https://arxiv.org/abs/2112.00114." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 477, + 541, + 515 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 477, + 541, + 515 + ], + "spans": [ + { + "bbox": [ + 70, + 477, + 541, + 515 + ], + "type": "text", + "content": "Catherine Olsson, Nelson Elhage, Neel Nanda, Nicholas Joseph, Nova DasSarma, Tom Henighan, Ben Mann, Amanda Askell, Yuntao Bai, Anna Chen, et al. In-context learning and induction heads. arXiv preprint arXiv:2209.11895, 2022." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 521, + 443, + 535 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 521, + 443, + 535 + ], + "spans": [ + { + "bbox": [ + 69, + 521, + 443, + 535 + ], + "type": "text", + "content": "OpenAI. Introducing gpt-4.5. https://openai.com/index/introducing-gpt-4-5/, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 540, + 541, + 733 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 540, + 541, + 733 + ], + "spans": [ + { + "bbox": [ + 70, + 540, + 541, + 733 + ], + "type": "text", + "content": "OpenAI, :, Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, Alex Iftimie, Alex Karpenko, Alex Tachard Passos, Alexander Neitz, Alexander Prokofiev, Alexander Wei, Allison Tam, Ally Bennett, Ananya Kumar, Andre Saraiva, Andrea Vallone, Andrew Duberstein, Andrew Kondrich, Andrey Mishchenko, Andy Applebaum, Angela Jiang, Ashvin Nair, Barret Zoph, Behrooz Ghorbani, Ben Rossen, Benjamin Sokolowsky, Boaz Barak, Bob McGrew, Borys Minaiev, Botao Hao, Bowen Baker, Brandon Houghton, Brandon McKinzie, Brydon Eastman, Camillo Lugaresi, Cary Bassin, Cary Hudson, Chak Ming Li, Charles de Bourcy, Chelsea Voss, Chen Shen, Chong Zhang, Chris Koch, Chris Orsinger, Christopher Hesse, Claudia Fischer, Clive Chan, Dan Roberts, Daniel Kappler, Daniel Levy, Daniel Selsam, David Dohan, David Farhi, David Mely, David Robinson, Dimitris Tsipras, Doug Li, Dragos Oprica, Eben Freeman, Eddie Zhang, Edmund Wong, Elizabeth Proehl, Enoch Cheung, Eric Mitchell, Eric Wallace, Erik Ritter, Evan Mays, Fan Wang, Felipe Petroski Such, Filippo Raso, Florencia Leoni, Foivos Tsimpourlas, Francis Song, Fred von Lohmann, Freddie Sulit, Geoff Salmon, Giambattista Parascandolo, Gildas Chabot, Grace Zhao, Greg Brockman, Guillaume Leclerc, Hadi Salman, Haiming Bao, Hao Sheng, Hart Andrin, Hessam Bagherinezhad, Hongyu Ren, Hunter Lightman, Hyung Won Chung, Ian Kivlichan, Ian O'Connell, Ian Osband, Ignasi Clavera Gilaberte, Ilge Akkaya, Ilya Kostrikov, Ilya Sutskever, Irina Kofman, Jakub Pachocki, James Lennon, Jason" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "58" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 57 + }, + { + "para_blocks": [ + { + "bbox": [ + 79, + 81, + 541, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 81, + 541, + 357 + ], + "spans": [ + { + "bbox": [ + 79, + 81, + 541, + 357 + ], + "type": "text", + "content": "Wei, Jean Harb, Jerry Twore, Jiacheng Feng, Jiahui Yu, Jiayi Weng, Jie Tang, Jieqi Yu, Joaquin Quñonero Candela, Joe Palermo, Joel Parish, Johannes Heidecke, John Hallman, John Rizzo, Jonathan Gordon, Jonathan Uesato, Jonathan Ward, Joost Huizinga, Julie Wang, Kai Chen, Kai Xiao, Karan Singhal, Karina Nguyen, Karl Cobbe, Katy Shi, Kayla Wood, Kendra Rimbach, Keren Gu-Lemberg, Kevin Liu, Kevin Lu, Kevin Stone, Kevin Yu, Lama Ahmad, Lauren Yang, Leo Liu, Leon Maksin, Leyton Ho, Liam Fedus, Lilian Weng, Linden Li, Lindsay McCallum, Lindsey Held, Lorenz Kuhn, Lukas Kondraciuk, Lukasz Kaiser, Luke Metz, Madelaine Boyd, Maja Trebacz, Manas Joglekar, Mark Chen, Marko Tintor, Mason Meyer, Matt Jones, Matt Kaufer, Max Schwarzer, Meghan Shah, Mehmet Yatbaz, Melody Y. Guan, Mengyuan Xu, Mengyuan Yan, Mia Glaese, Mianna Chen, Michael Lampe, Michael Malek, Michele Wang, Michelle Fradin, Mike McClay, Mikhail Pavlov, Miles Wang, Mingxuan Wang, Mira Murati, Mo Bavarian, Mostafa Rohaninejad, Nat McAleese, Neil Chowdhury, Neil Chowdhury, Nick Ryder, Nikolas Tezak, Noam Brown, Ofir Nachum, Oleg Boiko, Oleg Murk, Olivia Watkins, Patrick Chao, Paul Ashbourne, Pavel Izmailov, Peter Zhokhov, Rachel Dias, Rahul Arora, Randall Lin, Rapha Gontijo Lopes, Raz Gaon, Reah Miyara, Reimar Leike, Renny Hwang, Rhythm Garg, Robin Brown, Roshan James, Rui Shu, Ryan Cheu, Ryan Greene, Saachi Jain, Sam Altman, Sam Toizer, Sam Toyer, Samuel Miserendino, Sandhini Agarwal, Santiago Hernandez, Sasha Baker, Scott McKinney, Scottie Yan, Shengjia Zhao, Shengli Hu, Shibani Santurkar, Shraman Ray Chaudhuri, Shuyuan Zhang, Siyuan Fu, Spencer Papay, Steph Lin, Suchir Balaji, Suvansh Sanjeev, Szymon Sidor, Tal Broda, Aidan Clark, Tao Wang, Taylor Gordon, Ted Sanders, Tejal Patwardhan, Thibault Sottiaux, Thomas Degry, Thomas Dimson, Tianhao Zheng, Timur Garipov, Tom Stasi, Trapit Bansal, Trevor Creech, Troy Peterson, Tyna Eloundou, Valerie Qi, Vineet Kosaraju, Vinnie Monaco, Vitchyr Pong, Vlad Fomenko, Weiyi Zheng, Wenda Zhou, Wes McCabe, Wojciech Zaremba, Yann Dubois, Yinghai Lu, Yining Chen, Young Cha, Yu Bai, Yuchen He, Yuchen Zhang, Yunyun Wang, Zheng Shao and Zhuohan Li. Openai o1 system card 2024. URL https://arxiv.org/abs/2412.16720." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 365, + 541, + 733 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 70, + 365, + 541, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 365, + 541, + 426 + ], + "spans": [ + { + "bbox": [ + 70, + 365, + 541, + 426 + ], + "type": "text", + "content": "OpenAI, :, Ahmed El-Kishky, Alexander Wei, Andre Saraiva, Borys Minaev, Daniel Selsam, David Dohan, Francis Song, Hunter Lightman, Ignasi Clavera, Jakub Pachocki, Jerry Tworek, Lorenz Kuhn, Lukasz Kaiser, Mark Chen, Max Schwarzer, Mostafa Rohaninejad, Nat McAleese, o3 contributors, Oleg Mürk, Rhythm Garg, Rui Shu, Szymon Sidor, Vineet Kosaraju, and Wenda Zhou. Competitive programming with large reasoning models, 2025. URL https://arxiv.org/abs/2502.06807." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 434, + 541, + 493 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 434, + 541, + 493 + ], + "spans": [ + { + "bbox": [ + 70, + 434, + 541, + 493 + ], + "type": "text", + "content": "Long Ouyang, Jeff Wu, Xu Jiang, Diogo Almeida, Carroll L. Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, John Schulman, Jacob Hilton, Fraser Kelton, Luke Miller, Maddie Simens, Amanda Askell, Peter Welinder, Paul Christiano, Jan Leike, and Ryan Lowe. Training language models to follow instructions with human feedback, 2022. URL https://arxiv.org/abs/2203.02155." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 502, + 541, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 502, + 541, + 540 + ], + "spans": [ + { + "bbox": [ + 70, + 502, + 541, + 540 + ], + "type": "text", + "content": "Bo Pan, Jiaying Lu, Ke Wang, Li Zheng, Zhen Wen, Yingchaojie Feng, Minfeng Zhu, and Wei Chen. Agent-coord: Visually exploring coordination strategy for llm-based multi-agent collaboration. arXiv preprint arXiv:2404.11943, 2024a." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 546, + 541, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 546, + 541, + 608 + ], + "spans": [ + { + "bbox": [ + 70, + 546, + 541, + 608 + ], + "type": "text", + "content": "Liangming Pan, Alon Albalak, Xinyi Wang, and William Wang. Logic-LM: Empowering large language models with symbolic solvers for faithful logical reasoning. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Findings of the Association for Computational Linguistics: EMNLP 2023, pp. 3806-3824, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-emnlp.248. URL https://aclanthology.org/2023-findings-emnlp.248/." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 615, + 541, + 664 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 615, + 541, + 664 + ], + "spans": [ + { + "bbox": [ + 70, + 615, + 541, + 664 + ], + "type": "text", + "content": "Liangming Pan, Michael Saxon, Wenda Xu, Deepak Nathani, Xinyi Wang, and William Yang Wang. Automatically correcting large language models: Surveying the landscape of diverse automated correction strategies. Transactions of the Association for Computational Linguistics, 12:484-506, 2024b. doi: 10.1162/tacl_a_00660. URL https://aclanthology.org/2024.tacl-1.27/." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 672, + 541, + 733 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 672, + 541, + 733 + ], + "spans": [ + { + "bbox": [ + 70, + 672, + 541, + 733 + ], + "type": "text", + "content": "Bhargavi Paranjape, Julian Michael, Marjan Ghazvininejad, Hannaneh Hajishirzi, and Luke Zettlemoyer. Prompting contrastive explanations for commonsense reasoning tasks. In Chengqing Zong, Fei Xia, Wenjie Li, and Roberto Navigli (eds.), Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021, pp. 4179-4192, Online, August 2021. Association for Computational Linguistics. doi: 10.18653/v1/2021-findings-acl.366. URL https://aclanthology.org/2021-findings-acl.366/." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "59" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 58 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 81, + 541, + 732 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 70, + 81, + 541, + 108 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 81, + 541, + 108 + ], + "spans": [ + { + "bbox": [ + 70, + 81, + 541, + 108 + ], + "type": "text", + "content": "Remo Pareschi. Abductive reasoning with the gpt-4 language model: Case studies from criminal investigation, medical practice, scientific research. _Sistema intelligenti_, 35(2):435-444, 2023." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 115, + 311, + 129 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 115, + 311, + 129 + ], + "spans": [ + { + "bbox": [ + 70, + 115, + 311, + 129 + ], + "type": "text", + "content": "John Arthur Passmore. Philosophical reasoning. 1961." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 137, + 541, + 174 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 137, + 541, + 174 + ], + "spans": [ + { + "bbox": [ + 70, + 137, + 541, + 174 + ], + "type": "text", + "content": "Pouya Pezeshkpour, Eser Kandogan, Nikita Bhutani, Sajjadur Rahman, Tom Mitchell, and Estevam Hruschka. Reasoning capacity in multi-agent systems: Limitations, challenges and human-centered solutions, 2024. URL https://arxiv.org/abs/2402.01108." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 182, + 541, + 219 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 182, + 541, + 219 + ], + "spans": [ + { + "bbox": [ + 70, + 182, + 541, + 219 + ], + "type": "text", + "content": "Gabriel Poesia, Kanishk Gandhi, Eric Zelikman, and Noah Goodman. Certified deductive reasoning with language models. Transactions on Machine Learning Research, 2024. ISSN 2835-8856. URL https://openreview.net/forum?id=yXnwrs2T16." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 227, + 541, + 264 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 227, + 541, + 264 + ], + "spans": [ + { + "bbox": [ + 70, + 227, + 541, + 264 + ], + "type": "text", + "content": "Mohammadreza Pourreza and Davood Rafiei. DIN-SQL: Decomposed in-context learning of text-to-SQL with self-correction. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=p53QDxSIc5." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 273, + 541, + 345 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 273, + 541, + 345 + ], + "spans": [ + { + "bbox": [ + 70, + 273, + 541, + 345 + ], + "type": "text", + "content": "Ben Prystawski, Michael Li, and Noah D. Goodman. Why think step by step? reasoning emerges from the locality of experience. In Alice Oh, Tristan Naumann, Amir Globerson, Kate Saenko, Moritz Hardt, and Sergey Levine (eds.), Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. URL http://papers.nips.cc/paper_files/paper/2023/bitstream/e0af79ad53a336b4c4b4f7e2a68eb609-Abstract-Conference.html." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 354, + 541, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 354, + 541, + 415 + ], + "spans": [ + { + "bbox": [ + 70, + 354, + 541, + 415 + ], + "type": "text", + "content": "Reid Pryzant, Dan Iter, Jerry Li, Yin Lee, Chenguang Zhu, and Michael Zeng. Automatic prompt optimization with \"gradient descent\" and beam search. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pp. 7957-7968, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.494. URL https://aclanthology.org/2023.emnlp-main.494/." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 423, + 541, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 423, + 541, + 460 + ], + "spans": [ + { + "bbox": [ + 70, + 423, + 541, + 460 + ], + "type": "text", + "content": "Pranav Putta, Edmund Mills, Naman Garg, Sumeet Motwani, Chelsea Finn, Divyansh Garg, and Rafael Rafailov. Agent q: Advanced reasoning and learning for autonomous ai agents. arXiv preprint arXiv:2408.07199, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 468, + 541, + 506 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 468, + 541, + 506 + ], + "spans": [ + { + "bbox": [ + 70, + 468, + 541, + 506 + ], + "type": "text", + "content": "Zhenting Qi, Hongyin Luo, Xuliang Huang, Zhuokai Zhao, Yibo Jiang, Xiangjun Fan, Himabindu Lakkaraju, and James Glass. Quantifying generalization complexity for large language models, 2024. URL https://arxiv.org/abs/2410.01769." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 514, + 541, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 514, + 541, + 540 + ], + "spans": [ + { + "bbox": [ + 69, + 514, + 541, + 540 + ], + "type": "text", + "content": "Shuofei Qiao, Honghao Gui, Chengfei Lv, Qianghuai Jia, Huajun Chen, and Ningyu Zhang. Making language models better tool learners with execution feedback. arXiv preprint arXiv:2305.13068, 2023a." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 547, + 541, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 547, + 541, + 597 + ], + "spans": [ + { + "bbox": [ + 70, + 547, + 541, + 597 + ], + "type": "text", + "content": "Shuofei Qiao, Yixin Ou, Ningyu Zhang, Xiang Chen, Yunzhi Yao, Shumin Deng, Chuanqi Tan, Fei Huang, and Huajun Chen. Reasoning with language model prompting: A survey. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 5368-5393, Toronto, Canada, July 2023b. URL https://aclanthology.org/2023.acl-long.294/." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 604, + 541, + 642 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 604, + 541, + 642 + ], + "spans": [ + { + "bbox": [ + 70, + 604, + 541, + 642 + ], + "type": "text", + "content": "Chengwei Qin, Wenhan Xia, Tan Wang, Fangkai Jiao, Yuchen Hu, Bosheng Ding, Ruirui Chen, and Shafiq Joty. Relevant or random: Can llms truly perform analogical reasoning? ACL-Findings, 2025. URL https://arxiv.org/abs/2404.12728." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 650, + 541, + 687 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 650, + 541, + 687 + ], + "spans": [ + { + "bbox": [ + 70, + 650, + 541, + 687 + ], + "type": "text", + "content": "Yiwei Qin, Xuefeng Li, Haoyang Zou, Yixiu Liu, Shijie Xia, Zhen Huang, Yixin Ye, Weizhe Yuan, Hector Liu, Yuanzhi Li, and Pengfei Liu. O1 replication journey: A strategic progress report - part 1, 2024. URL https://arxiv.org/abs/2410.18982." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 695, + 541, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 695, + 541, + 732 + ], + "spans": [ + { + "bbox": [ + 69, + 695, + 541, + 732 + ], + "type": "text", + "content": "Xihe Qiu, Haoyu Wang, Xiaoyu Tan, Chao Qu, Yujie Xiong, Yuan Cheng, Yinghui Xu, Wei Chu, and Yuan Qi. Towards collaborative intelligence: Propagating intentions and reasoning for multi-agent coordination with large language models, 2024. URL https://arxiv.org/abs/2407.12532." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "60" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 59 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 81, + 541, + 734 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 70, + 81, + 541, + 119 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 81, + 541, + 119 + ], + "spans": [ + { + "bbox": [ + 70, + 81, + 541, + 119 + ], + "type": "text", + "content": "Yuxiao Qu, Tianjun Zhang, Naman Garg, and Aviral Kumar. Recursive introspection: Teaching language model agents how to self-improve. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024a. URL https://openreview.net/forum?id=DRC9pZwBwR." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 125, + 541, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 125, + 541, + 152 + ], + "spans": [ + { + "bbox": [ + 70, + 125, + 541, + 152 + ], + "type": "text", + "content": "Yuxiao Qu, Tianjun Zhang, Naman Garg, and Aviral Kumar. Recursive introspection: Teaching language model agents how to self-improve. arXiv preprint arXiv:2407.18219, 2024b." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 156, + 541, + 232 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 156, + 541, + 232 + ], + "spans": [ + { + "bbox": [ + 70, + 156, + 541, + 232 + ], + "type": "text", + "content": "Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D. Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. In Alice Oh, Tristan Naumann, Amir Globerson, Kate Saenko, Moritz Hardt, and Sergey Levine (eds.), Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. URL http://papers.nips.cc/paper_files/paper/2023/hash/a85b405ed65c6477a4fe8302b5e06ce7-AAbstract-Conference.html." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 236, + 541, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 236, + 541, + 262 + ], + "spans": [ + { + "bbox": [ + 70, + 236, + 541, + 262 + ], + "type": "text", + "content": "Nazneen Fatema Rajani, Bryan McCann, Caiming Xiong, and Richard Socher. Explain yourself! leveraging language models for commonsense reasoning. arXiv preprint arXiv:1906.02361, 2019." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 267, + 541, + 305 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 267, + 541, + 305 + ], + "spans": [ + { + "bbox": [ + 70, + 267, + 541, + 305 + ], + "type": "text", + "content": "Shyam Sundhar Ramesh, Yifan Hu, Iason Chaimalas, Viraj Mehta, Pier Giuseppe Sessa, Haitham Bou Ammar, and Ilija Bogunovic. Group robust preference optimization in reward-free rlhf. arXiv preprint arXiv:2405.20304, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 311, + 541, + 349 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 311, + 541, + 349 + ], + "spans": [ + { + "bbox": [ + 70, + 311, + 541, + 349 + ], + "type": "text", + "content": "Jingqing Ruan, Yali Du, Xuantang Xiong, Dengpeng Xing, Xiyun Li, Linghui Meng, Haifeng Zhang, Jun Wang, and Bo Xu. Gcs: Graph-based coordination strategy for multi-agent reinforcement learning. arXiv preprint arXiv:2201.06257, 2022." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 354, + 541, + 428 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 354, + 541, + 428 + ], + "spans": [ + { + "bbox": [ + 70, + 354, + 541, + 428 + ], + "type": "text", + "content": "Ohad Rubin, Jonathan Herzig, and Jonathan Berant. Learning to retrieve prompts for in-context learning. In Marine Carpuat, Marie-Catherine de Marneffé, and Ivan Vladimir Meza Ruiz (eds.), Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 2655-2671, Seattle, United States, July 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.naacl-main.191. URL https://aclanthology.org/2022.naacl-main.191/." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 434, + 541, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 434, + 541, + 449 + ], + "spans": [ + { + "bbox": [ + 69, + 434, + 541, + 449 + ], + "type": "text", + "content": "Stuart Russell and Peter Norvig. Artificial Intelligence: A Modern Approach. Prentice Hall, 3 edition, 2010." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 454, + 541, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 454, + 541, + 491 + ], + "spans": [ + { + "bbox": [ + 70, + 454, + 541, + 491 + ], + "type": "text", + "content": "Jon Saad-Falcon, Rajan Vivek, William Berrios, Nandita Shankar Naik, Matija Franklin, Bertie Vidgen, Amanpreet Singh, Douwe Kiela, and Shikib Mehri. Lmunit: Fine-grained evaluation with natural language unit tests. arXiv preprint arXiv:2412.13091, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 497, + 541, + 523 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 497, + 541, + 523 + ], + "spans": [ + { + "bbox": [ + 69, + 497, + 541, + 523 + ], + "type": "text", + "content": "Amir Saeidi, Shivanshu Verma, Aswin RRV, and Chitta Baral. Triple preference optimization: Achieving better alignment with less data in a single step optimization. arXiv preprint arXiv:2405.16681, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 529, + 541, + 627 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 529, + 541, + 627 + ], + "spans": [ + { + "bbox": [ + 70, + 529, + 541, + 627 + ], + "type": "text", + "content": "Victor Sanh, Albert Webson, Colin Raffel, Stephen Bach, Lintang Sutawika, Zaid Alyafeai, Antoine Chaffin, Arnaud Stiegler, Arun Raja, Manan Dey, M Saiful Bari, Canwen Xu, Urmish Thakker, Shanya Sharma Sharma, Eliza Szczechla, Taewoon Kim, Gunjan Chhablani, Nihal Nayak, Debajyoti Datta, Jonathan Chang, Mike Tian-Jian Jiang, Han Wang, Matteo Manica, Sheng Shen, Zheng Xin Yong, Harshit Pandey, Rachel Bawden, Thomas Wang, Trishala Neeraj, Jos Rozen, Abheesht Sharma, Andrea Santilli, Thibault Fevry, Jason Alan Fries, Ryan Teehan, Teven Le Scao, Stella Biderman, Leo Gao, Thomas Wolf, and Alexander M Rush. Multitask prompted training enables zero-shot task generalization. In International Conference on Learning Representations, 2022. URL https://openreview.net/forum?id=9Vrb9D0WI4." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 632, + 541, + 670 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 632, + 541, + 670 + ], + "spans": [ + { + "bbox": [ + 70, + 632, + 541, + 670 + ], + "type": "text", + "content": "Abulhair Saparov and He He. Language models are greedy reasoners: A systematic formal analysis of chain-of-thought. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=qFVVBzXxR2V." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 675, + 541, + 702 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 675, + 541, + 702 + ], + "spans": [ + { + "bbox": [ + 69, + 675, + 541, + 702 + ], + "type": "text", + "content": "William Saunders, Catherine Yeh, Jeff Wu, Steven Bills, Long Ouyang, Jonathan Ward, and Jan Leike. Self-critiquing models for assisting human evaluators. arXiv preprint arXiv:2206.05802, 2022." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 707, + 541, + 734 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 707, + 541, + 734 + ], + "spans": [ + { + "bbox": [ + 69, + 707, + 541, + 734 + ], + "type": "text", + "content": "Erik Schluntz and Barry Zhang. Building effective agents. https://www.anthropic.com/, Dec 2024. URL https://www.anthropic.com/research/building-effective-agents." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "61" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 60 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 81, + 541, + 733 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 70, + 81, + 541, + 108 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 81, + 541, + 108 + ], + "spans": [ + { + "bbox": [ + 70, + 81, + 541, + 108 + ], + "type": "text", + "content": "John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 114, + 541, + 163 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 114, + 541, + 163 + ], + "spans": [ + { + "bbox": [ + 70, + 114, + 541, + 163 + ], + "type": "text", + "content": "Melanie Sclar, Yejin Choi, Yulia Tsvetkov, and Alane Suhr. Quantifying language models' sensitivity to spurious features in prompt design or: How i learned to start worrying about prompt formatting. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=RIu51yNXjT." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 170, + 541, + 232 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 170, + 541, + 232 + ], + "spans": [ + { + "bbox": [ + 70, + 170, + 541, + 232 + ], + "type": "text", + "content": "S Seals and Valerie Shalin. Evaluating the deductive competence of large language models. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 8614-8630, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.naacl-long.476. URL https://aclanthology.org/2024.naacl-long.476/." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 239, + 541, + 264 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 239, + 541, + 264 + ], + "spans": [ + { + "bbox": [ + 70, + 239, + 541, + 264 + ], + "type": "text", + "content": "H Seo and D Lee. Reinforcement learning and strategic reasoning during social decision-making. In Decision Neuroscience, pp. 225-231. Elsevier, 2017." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 271, + 541, + 332 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 271, + 541, + 332 + ], + "spans": [ + { + "bbox": [ + 70, + 271, + 541, + 332 + ], + "type": "text", + "content": "Pier Giuseppe Sessa, Robert Dadashi, Léonard Hussenot, Johan Ferret, Nino Vieillard, Alexandre Ramé, Bobak Shahriari, Sarah Perrin, Abe Friesen, Geoffrey Cideron, Sertan Girgin, Piotr Stanczyk, Andrea Michi, Danila Sinopalnikov, Sabela Ramos, Amélie Héliou, Aliaksei Severyn, Matt Hoffman, Nikola Momchev, and Olivier Bachem. BOND: aligning llms with best-of-n distillation. CoRR, abs/2407.14622, 2024. URL https://doi.org/10.48550/arXiv.2407.14622." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 339, + 541, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 339, + 541, + 376 + ], + "spans": [ + { + "bbox": [ + 70, + 339, + 541, + 376 + ], + "type": "text", + "content": "Amrith Setlur, Chirag Nagpal, Adam Fisch, Xinyang Geng, Jacob Eisenstein, Rishabh Agarwal, Alekh Agarwal, Jonathan Berant, and Aviral Kumar. Rewarding progress: Scaling automated process verifiers for llm reasoning. arXiv preprint arXiv:2410.08146, 2024a." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 384, + 541, + 433 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 384, + 541, + 433 + ], + "spans": [ + { + "bbox": [ + 70, + 384, + 541, + 433 + ], + "type": "text", + "content": "Amrith Setlur, Chirag Nagpal, Adam Fisch, Xinyang Geng, Jacob Eisenstein, Rishabh Agarwal, Alekh Agarwal, Jonathan Berant, and Aviral Kumar. Rewarding progress: Scaling automated process verifiers for LLM reasoning. CoRR, abs/2410.08146, 2024b. doi: 10.48550/ARXIV.2410.08146. URL https://doi.org/10.48550/arXiv.2410.08146." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 441, + 541, + 466 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 441, + 541, + 466 + ], + "spans": [ + { + "bbox": [ + 70, + 441, + 541, + 466 + ], + "type": "text", + "content": "Murray Shanahan, Kyle McDonell, and Laria Reynolds. Role play with large language models. Nature, 623 (7987):493-498, 2023a." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 472, + 541, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 472, + 541, + 498 + ], + "spans": [ + { + "bbox": [ + 69, + 472, + 541, + 498 + ], + "type": "text", + "content": "Murray Shanahan, Kyle McDonell, and Laria Reynolds. Role-play with large language models, 2023b. URL https://arxiv.org/abs/2305.16367." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 506, + 541, + 543 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 506, + 541, + 543 + ], + "spans": [ + { + "bbox": [ + 70, + 506, + 541, + 543 + ], + "type": "text", + "content": "Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 550, + 541, + 587 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 550, + 541, + 587 + ], + "spans": [ + { + "bbox": [ + 70, + 550, + 541, + 587 + ], + "type": "text", + "content": "Zhengliang Shi, Weiwei Sun, Shen Gao, Pengjie Ren, Zhumin Chen, and Zhaochun Ren. Generate-then-ground in retrieval-augmented generation for multi-hop question answering. arXiv preprint arXiv:2406.14891, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 594, + 541, + 631 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 594, + 541, + 631 + ], + "spans": [ + { + "bbox": [ + 70, + 594, + 541, + 631 + ], + "type": "text", + "content": "Noah Shinn, Federico Cassano, Ashwin Gopinath, Karthik Narasimhan, and Shunyu Yao. Reflexion: Language agents with verbal reinforcement learning. Advances in Neural Information Processing Systems, 36, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 638, + 541, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 638, + 541, + 689 + ], + "spans": [ + { + "bbox": [ + 70, + 638, + 541, + 689 + ], + "type": "text", + "content": "Kumar Shridhar, Koustuv Sinha, Andrew Cohen, Tianlu Wang, Ping Yu, Ramakanth Pasunuru, Mrinmaya Sachan, Jason Weston, and Asli Celikyilmaz. The art of llm refinement: Ask, refine, and trust. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 5872-5883, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 695, + 541, + 733 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 695, + 541, + 733 + ], + "spans": [ + { + "bbox": [ + 70, + 695, + 541, + 733 + ], + "type": "text", + "content": "Chenglei Si, Zhe Gan, Zhengyuan Yang, Shuohang Wang, Jianfeng Wang, Jordan Lee Boyd-Graber, and Lijuan Wang. Prompting GPT-3 to be reliable. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=98p5x51L5af." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "62" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 61 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 81, + 541, + 734 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 70, + 81, + 541, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 81, + 541, + 118 + ], + "spans": [ + { + "bbox": [ + 70, + 81, + 541, + 118 + ], + "type": "text", + "content": "Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling LLM test-time compute optimally can be more effective than scaling model parameters. CoRR, abs/2408.03314, 2024. doi: 10.48550/ARXIV.2408.03314. URL https://doi.org/10.48550/arXiv.2408.03314." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 127, + 541, + 164 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 127, + 541, + 164 + ], + "spans": [ + { + "bbox": [ + 70, + 127, + 541, + 164 + ], + "type": "text", + "content": "Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling test-time compute optimally can be more effective than scaling LLM parameters. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=4FWAwZtd2n." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 172, + 541, + 211 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 172, + 541, + 211 + ], + "spans": [ + { + "bbox": [ + 70, + 172, + 541, + 211 + ], + "type": "text", + "content": "Yifan Song, Weimin Xiong, Xiutian Zhao, Dawei Zhu, Wenhao Wu, Ke Wang, Cheng Li, Wei Peng, and Sujian Li. Agentbank: Towards generalized llm agents via fine-tuning on " + }, + { + "bbox": [ + 70, + 172, + 541, + 211 + ], + "type": "inline_equation", + "content": "50000+" + }, + { + "bbox": [ + 70, + 172, + 541, + 211 + ], + "type": "text", + "content": " interaction trajectories. arXiv preprint arXiv:2410.07706, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 218, + 541, + 267 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 218, + 541, + 267 + ], + "spans": [ + { + "bbox": [ + 70, + 218, + 541, + 267 + ], + "type": "text", + "content": "Zayne Sprague, Fangcong Yin, Juan Diego Rodriguez, Dongwei Jiang, Manya Wadhwa, Prasann Singhal, Xinyu Zhao, Xi Ye, Kyle Mahowald, and Greg Durrett. To cot or not to cot? chain-of-thought helps mainly on math and symbolic reasoning. arXiv preprint arXiv:2409.12183, 2024a. URL https://arxiv.org/pdf/2409.12183." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 276, + 541, + 314 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 276, + 541, + 314 + ], + "spans": [ + { + "bbox": [ + 70, + 276, + 541, + 314 + ], + "type": "text", + "content": "Zayne Sprague, Fangcong Yin, Juan Diego Rodriguez, Dongwei Jiang, Manya Wadhwa, Prasann Singhal, Xinyu Zhao, Xi Ye, Kyle Mahowald, and Greg Durrett. To cot or not to cot? chain-of-thought helps mainly on math and symbolic reasoning, 2024b. URL https://arxiv.org/abs/2409.12183." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 321, + 541, + 347 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 321, + 541, + 347 + ], + "spans": [ + { + "bbox": [ + 69, + 321, + 541, + 347 + ], + "type": "text", + "content": "Keith E Stanovich and Richard F West. Individual differences in reasoning: Implications for the rationality debate? Behavioral and Brain Sciences, 23(5):645-665, 2000." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 355, + 541, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 355, + 541, + 381 + ], + "spans": [ + { + "bbox": [ + 70, + 355, + 541, + 381 + ], + "type": "text", + "content": "Kaya Stechly, Matthew Marquez, and Subbarao Kambhampati. Gpt-4 doesn't know it's wrong: An analysis of iterative prompting for reasoning problems. arXiv preprint arXiv:2310.12397, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 388, + 541, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 388, + 541, + 415 + ], + "spans": [ + { + "bbox": [ + 69, + 388, + 541, + 415 + ], + "type": "text", + "content": "Kaya Stechly, Karthik Valmeekam, and Subbarao Kambhampati. On the self-verification limitations of large language models on reasoning and planning tasks. arXiv preprint arXiv:2402.08115, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 422, + 541, + 471 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 422, + 541, + 471 + ], + "spans": [ + { + "bbox": [ + 70, + 422, + 541, + 471 + ], + "type": "text", + "content": "Nisan Stiannon, Long Ouyang, Jeff Wu, Daniel M. Ziegler, Ryan Lowe, Chelsea Voss, Alec Radford, Dario Amodei, and Paul Christiano. Learning to summarize from human feedback. In Proceedings of the 34th International Conference on Neural Information Processing Systems, NIPS '20, Red Hook, NY, USA, 2020. Curran Associates Inc. ISBN 9781713829546." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 479, + 541, + 506 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 479, + 541, + 506 + ], + "spans": [ + { + "bbox": [ + 70, + 479, + 541, + 506 + ], + "type": "text", + "content": "Benedikt Stroebl, Sayash Kapoor, and Arvind Narayanan. Inference Scaling fLaws: The Limits of LLM Resampling with Imperfect Verifiers. arXiv preprint arXiv:2411.17501, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 514, + 541, + 549 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 514, + 541, + 549 + ], + "spans": [ + { + "bbox": [ + 70, + 514, + 541, + 549 + ], + "type": "text", + "content": "Vighnesh Subramaniam, Yilun Du, Joshua B Tenenbaum, Antonio Torralba, Shuang Li, and Igor Mordatch. Multiagent finetuning: Self improvement with diverse reasoning chains. arXiv preprint arXiv:2501.05707, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 559, + 541, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 559, + 541, + 620 + ], + "spans": [ + { + "bbox": [ + 70, + 559, + 541, + 620 + ], + "type": "text", + "content": "Yuan Sui, Mengyu Zhou, Mingjie Zhou, Shi Han, and Dongmei Zhang. Table meets llm: Can large language models understand structured table data? a benchmark and empirical study. In Proceedings of the 17th ACM International Conference on Web Search and Data Mining, WSDM '24, pp. 645-654, New York, NY, USA, 2024. Association for Computing Machinery. ISBN 9798400703713. doi: 10.1145/3616855.3635752. URL https://doi.org/10.1145/3616855.3635752." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 628, + 541, + 654 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 628, + 541, + 654 + ], + "spans": [ + { + "bbox": [ + 69, + 628, + 541, + 654 + ], + "type": "text", + "content": "Sainbayar Sukhbaatar, Rob Fergus, et al. Learning multiagent communication with backpropagation. Advances in neural information processing systems, 29, 2016." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 662, + 541, + 688 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 662, + 541, + 688 + ], + "spans": [ + { + "bbox": [ + 70, + 662, + 541, + 688 + ], + "type": "text", + "content": "Theodore R. Sumers, Shunyu Yao, Karthik Narasimhan, and Thomas L. Griffiths. Cognitive architectures for language agents, 2024. URL https://arxiv.org/abs/2309.02427." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 70, + 695, + 541, + 734 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 695, + 541, + 734 + ], + "spans": [ + { + "bbox": [ + 70, + 695, + 541, + 734 + ], + "type": "text", + "content": "Jiashuo Sun, Chengjin Xu, Lumingyuan Tang, Saizhuo Wang, Chen Lin, Yeyun Gong, Heung-Yeung Shum, and Jian Guo. Think-on-graph: Deep and responsible reasoning of large language model with knowledge graph. arXiv preprint arXiv:2307.07697, 2023." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "63" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 62 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 81, + 541, + 733 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 70, + 81, + 541, + 155 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 81, + 541, + 155 + ], + "spans": [ + { + "bbox": [ + 70, + 81, + 541, + 155 + ], + "type": "text", + "content": "Jiaxing Sun, Weiquan Huang, Jiang Wu, Chenya Gu, Wei Li, Songyang Zhang, Hang Yan, and Conghui He. Benchmarking Chinese commonsense reasoning of LLMs: From Chinese-specifics to reasoning-memorization correlations. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 11205-11228, Bangkok, Thailand, August 2024a. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.604. URL https://aclanthology.org/2024.acl-long.604/." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 162, + 541, + 187 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 162, + 541, + 187 + ], + "spans": [ + { + "bbox": [ + 70, + 162, + 541, + 187 + ], + "type": "text", + "content": "Shichao Sun, Junlong Li, Weizhe Yuan, Ruifeng Yuan, Wenjie Li, and Pengfei Liu. The critique of critique. arXiv preprint arXiv:2401.04518, 2024b." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 194, + 541, + 232 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 194, + 541, + 232 + ], + "spans": [ + { + "bbox": [ + 70, + 194, + 541, + 232 + ], + "type": "text", + "content": "Zhiqing Sun, Longhui Yu, Yikang Shen, Weiyang Liu, Yiming Yang, Sean Welleck, and Chuang Gan. Easy-to-hard generalization: Scalable alignment beyond human supervision. CoRR, abs/2403.09472, 2024c. doi: 10.48550/ARXIV.2403.09472. URL https://doi.org/10.48550/arXiv.2403.09472." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 238, + 446, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 238, + 446, + 251 + ], + "spans": [ + { + "bbox": [ + 69, + 238, + 446, + 251 + ], + "type": "text", + "content": "Richard S Sutton. Reinforcement learning: An introduction. A Bradford Book, 2018." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 258, + 541, + 283 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 258, + 541, + 283 + ], + "spans": [ + { + "bbox": [ + 70, + 258, + 541, + 283 + ], + "type": "text", + "content": "Mirac Suzgun and Adam Tauman Kalai. Meta-prompting: Enhancing language models with task-agnostic scaffolding. arXiv preprint arXiv:2401.12954, 2024a." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 291, + 541, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 291, + 541, + 316 + ], + "spans": [ + { + "bbox": [ + 70, + 291, + 541, + 316 + ], + "type": "text", + "content": "Mirac Suzgun and Adam Tauman Kalai. Meta-prompting: Enhancing language models with task-agnostic scaffolding, 2024b. URL https://arxiv.org/abs/2401.12954." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 323, + 541, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 323, + 541, + 360 + ], + "spans": [ + { + "bbox": [ + 70, + 323, + 541, + 360 + ], + "type": "text", + "content": "Sijun Tan, Siyuan Zhuang, Kyle Montgomery, William Y Tang, Alejandro Cuadron, Chenguang Wang, Raluca Ada Popa, and Ion Stoica. Judgebench: A benchmark for evaluating llm-based judges. arXiv preprint arXiv:2410.12784, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 367, + 541, + 416 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 367, + 541, + 416 + ], + "spans": [ + { + "bbox": [ + 70, + 367, + 541, + 416 + ], + "type": "text", + "content": "Zhengyang Tang, Xingxing Zhang, Benyou Wang, and Furu Wei. Mathscale: Scaling instruction tuning for mathematical reasoning. In Forty-first International Conference on Machine Learning, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=Kjww7ZN47M." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 423, + 541, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 423, + 541, + 460 + ], + "spans": [ + { + "bbox": [ + 70, + 423, + 541, + 460 + ], + "type": "text", + "content": "Zhengyang Tang, Ziniu Li, Zhenyang Xiao, Tian Ding, Ruoyu Sun, Benyou Wang, Dayiheng Liu, Fei Huang, Tianyu Liu, Bowen Yu, and Junyang Lin. Enabling scalable oversight via self-evolving critic, 2025. URL https://arxiv.org/abs/2501.05727." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 468, + 541, + 504 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 468, + 541, + 504 + ], + "spans": [ + { + "bbox": [ + 70, + 468, + 541, + 504 + ], + "type": "text", + "content": "Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li, Carlos Guestrin, Percy Liang, and Tatsunori B. Hashimoto. Stanford alpaca: An instruction-following llama model. https://github.com/tatsu-lab/stanford_alpaca, 2023." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 511, + 541, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 511, + 541, + 669 + ], + "spans": [ + { + "bbox": [ + 70, + 511, + 541, + 669 + ], + "type": "text", + "content": "Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, Chuning Tang, Congcong Wang, Dehao Zhang, Enming Yuan, Enzhe Lu, Fengxiang Tang, Flood Sung, Guangda Wei, Guokun Lai, Haiqing Guo, Han Zhu, Hao Ding, Hao Hu, Hao Yang, Hao Zhang, Haotian Yao, Haotian Zhao, Haoyu Lu, Haoze Li, Haozhen Yu, Hongcheng Gao, Huabin Zheng, Huan Yuan, Jia Chen, Jianhang Guo, Jianlin Su, Jianzhou Wang, Jie Zhao, Jin Zhang, Jingyuan Liu, Junjie Yan, Junyan Wu, Lidong Shi, Ling Ye, Longhui Yu, Mengnan Dong, Neo Zhang, Ningchen Ma, Qiwei Pan, Qucheng Gong, Shaowei Liu, Shengling Ma, Shupeng Wei, Sihan Cao, Siying Huang, Tao Jiang, Weihao Gao, Weimin Xiong, Weiran He, Weixiao Huang, Wenhao Wu, Wenyang He, Xianghui Wei, Xianqing Jia, Xingzhe Wu, Xinran Xu, Xinxing Zu, Xinyu Zhou, Xuehai Pan, Y. Charles, Yang Li, Yangyang Hu, Yangyang Liu, Yanru Chen, Yejie Wang, Yibo Liu, Yidao Qin, Yifeng Liu, Ying Yang, Yiping Bao, Yulun Du, Yuxin Wu, Yuzhi Wang, Zaida Zhou, Zhaoji Wang, Zhaowei Li, Zhen Zhu, Zheng Zhang, Zhexu Wang, Zhilin Yang, Zhiqi Huang, Ziyao Xu, and Zonghan Yang. Kimi k1.5: Scaling reinforcement learning with llms, 2025. URL https://arxiv.org/abs/2501.12599." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 675, + 541, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 675, + 541, + 700 + ], + "spans": [ + { + "bbox": [ + 69, + 675, + 541, + 700 + ], + "type": "text", + "content": "Qwen Team. Qwen2.5: A party of foundation models, September 2024. URL https://qwenlm.github.io/blog/qwen2.5/." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 708, + 541, + 733 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 708, + 541, + 733 + ], + "spans": [ + { + "bbox": [ + 69, + 708, + 541, + 733 + ], + "type": "text", + "content": "Amitayush Thakur, George Tsoukalas, Yeming Wen, Jimmy Xin, and Swarat Chaudhuri. An in-context learning agent for formal theorem-proving. In Conference on Language Modeling (COLM), 2024." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "64" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 63 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 81, + 541, + 732 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 70, + 81, + 539, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 81, + 539, + 106 + ], + "spans": [ + { + "bbox": [ + 70, + 81, + 539, + 106 + ], + "type": "text", + "content": "The Coq Development Team. The Coq Proof Assistant. 2024. URL https://coq.inria.fr/doc/V8.20.0/refman/index.html. Version 8.20.0." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 112, + 541, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 112, + 541, + 175 + ], + "spans": [ + { + "bbox": [ + 70, + 112, + 541, + 175 + ], + "type": "text", + "content": "Qingyuan Tian, Hanlun Zhu, Lei Wang, Yang Li, and Yunshi Lan. " + }, + { + "bbox": [ + 70, + 112, + 541, + 175 + ], + "type": "inline_equation", + "content": "\\mathbf{R}^3" + }, + { + "bbox": [ + 70, + 112, + 541, + 175 + ], + "type": "text", + "content": " prompting: Review, rephrase and resolve for chain-of-thought reasoning in large language models under noisy context. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Findings of the Association for Computational Linguistics: EMNLP 2023, pp. 1670-1685, Singapore, December 2023. Association for Computational Linguistics. doi: 10. 18653/v1/2023-findings-emnlp.114. URL https://aclanthology.org/2023-findings-emnlp.114/." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 179, + 539, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 179, + 539, + 205 + ], + "spans": [ + { + "bbox": [ + 70, + 179, + 539, + 205 + ], + "type": "text", + "content": "Ye Tian, Baolin Peng, Linfeng Song, Lifeng Jin, Dian Yu, Haitao Mi, and Dong Yu. Toward self-improvement of llms via imagination, searching, and criticizing. arXiv preprint arXiv:2404.12253, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 211, + 539, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 211, + 539, + 247 + ], + "spans": [ + { + "bbox": [ + 70, + 211, + 539, + 247 + ], + "type": "text", + "content": "Yuxuan Tong, Xiwen Zhang, Rui Wang, Ruidong Wu, and Junxian He. Dart-math: Difficulty-aware rejection tuning for mathematical problem-solving. CoRR, abs/2407.13690, 2024. doi: 10.48550/ARXIV.2407.13690. URL https://doi.org/10.48550/arXiv.2407.13690." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 254, + 539, + 291 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 254, + 539, + 291 + ], + "spans": [ + { + "bbox": [ + 70, + 254, + 539, + 291 + ], + "type": "text", + "content": "Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 297, + 539, + 332 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 297, + 539, + 332 + ], + "spans": [ + { + "bbox": [ + 70, + 297, + 539, + 332 + ], + "type": "text", + "content": "Vince Trencsenyi, Agnieszka Mensfelt, and Kostas Stathis. Approximating human strategic reasoning with llm-enhanced recursive reasoners leveraging multi-agent hypergames. arXiv preprint arXiv:2502.07443, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 339, + 539, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 339, + 539, + 365 + ], + "spans": [ + { + "bbox": [ + 70, + 339, + 539, + 365 + ], + "type": "text", + "content": "Trieu H Trinh, Yuhuai Wu, Quoc V Le, He He, and Thang Luong. Solving olympiad geometry without human demonstrations. Nature, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 370, + 539, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 370, + 539, + 407 + ], + "spans": [ + { + "bbox": [ + 70, + 370, + 539, + 407 + ], + "type": "text", + "content": "Prapti Trivedi, Aditya Gulati, Oliver Molenschot, Meghana Arakkal Rajeev, Rajkumar Ramamurthy, Keith Stevens, Tanveesh Singh Chaudhery, Jahnavi Jambholkar, James Zou, and Nazneen Rajani. Self-rationalization improves llm as a fine-grained judge. arXiv preprint arXiv:2410.05495, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 414, + 539, + 451 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 414, + 539, + 451 + ], + "spans": [ + { + "bbox": [ + 70, + 414, + 539, + 451 + ], + "type": "text", + "content": "Jonathan Uesato, Nate Kushman, Ramana Kumar, Francis Song, Noah Siegel, Lisa Wang, Antonia Creswell, Geoffrey Irving, and Irina Higgins. Solving math word problems with process-and outcome-based feedback. arXiv preprint arXiv:2211.14275, 2022." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 456, + 539, + 482 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 456, + 539, + 482 + ], + "spans": [ + { + "bbox": [ + 69, + 456, + 539, + 482 + ], + "type": "text", + "content": "Karthik Valmeekam, Matthew Marquez, and Subbarao Kambhampati. Can large language models really improve by self-critiquing their own plans? arXiv preprint arXiv:2310.08118, 2023." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 487, + 539, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 487, + 539, + 525 + ], + "spans": [ + { + "bbox": [ + 70, + 487, + 539, + 525 + ], + "type": "text", + "content": "Pat Verga, Sebastian Hofstatter, Sophia Althammer, Yixuan Su, Aleksandra Piktus, Arkady Arkhangorodsky, Minjie Xu, Naomi White, and Patrick Lewis. Replacing judges with juries: Evaluating llm generations with a panel of diverse models. arXiv preprint arXiv:2404.18796, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 531, + 539, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 531, + 539, + 567 + ], + "spans": [ + { + "bbox": [ + 70, + 531, + 539, + 567 + ], + "type": "text", + "content": "Johannes Von Oswald, Eyvind Niklasson, Ettore Randazzo, Joao Sacramento, Alexander Mordvintsev, Andrey Zhmoginov, and Max Vlademyrov. Transformers learn in-context by gradient descent. In International Conference on Machine Learning, pp. 35151-35174. PMLR, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 574, + 539, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 574, + 539, + 611 + ], + "spans": [ + { + "bbox": [ + 70, + 574, + 539, + 611 + ], + "type": "text", + "content": "Tu Vu, Kalpesh Krishna, Salaheddin Alzubi, Chris Tar, Manaal Faruqui, and Yun-Hsuan Sung. Foundational autorators: Taming large language models for better automatic evaluation. arXiv preprint arXiv:2407.10817, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 616, + 539, + 654 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 616, + 539, + 654 + ], + "spans": [ + { + "bbox": [ + 70, + 616, + 539, + 654 + ], + "type": "text", + "content": "Xingchen Wan, Ruoxi Sun, Hootan Nakhost, and Sercan O Arik. Teach better or show smarter? on instructions and exemplars in automatic prompt optimization. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024a. URL https://openreview.net/forum?id=IdtoJVWvNx." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 70, + 659, + 539, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 659, + 539, + 732 + ], + "spans": [ + { + "bbox": [ + 70, + 659, + 539, + 732 + ], + "type": "text", + "content": "Yuxuan Wan, Wenxuan Wang, Yiliu Yang, Youliang Yuan, Jen-tse Huang, Pinjia He, Wenxiang Jiao, and Michael Lyu. LogicAsker: Evaluating and improving the logical reasoning ability of large language models. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (eds.), Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pp. 2124-2155, Miami, Florida, USA, November 2024b. Association for Computational Linguistics. doi: 10.18653/v1/2024.emnlp-main.128. URL https://aclanthology.org/2024.emnlp-main.128/." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "65" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 64 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 81, + 541, + 733 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 70, + 81, + 541, + 131 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 81, + 541, + 131 + ], + "spans": [ + { + "bbox": [ + 70, + 81, + 541, + 131 + ], + "type": "text", + "content": "Ziyu Wan, Xidong Feng, Muning Wen, Stephen Marcus McAleer, Ying Wen, Weinan Zhang, and Jun Wang. Alphazero-like tree-search can guide large language model decoding and training. In _Forty-first International Conference on Machine Learning_, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net, 2024c. URL https://openreview.net/forum?id=C4OpREezgj." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 138, + 541, + 212 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 138, + 541, + 212 + ], + "spans": [ + { + "bbox": [ + 70, + 138, + 541, + 212 + ], + "type": "text", + "content": "Boshi Wang, Sewon Min, Xiang Deng, Jiaming Shen, You Wu, Luke Zettlemoyer, and Huan Sun. Towards understanding chain-of-thought prompting: An empirical study of what matters. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 2717-2739, Toronto, Canada, July 2023a. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.153. URL https://aclanthology.org/2023.acl-long.153/." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 219, + 541, + 245 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 219, + 541, + 245 + ], + "spans": [ + { + "bbox": [ + 70, + 219, + 541, + 245 + ], + "type": "text", + "content": "Han Wang, Archiki Prasad, Elias Stengel-Eskin, and Mohit Bansal. Soft self-consistency improves language model agents. arXiv preprint arXiv:2402.13212, 2024a." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 252, + 541, + 291 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 252, + 541, + 291 + ], + "spans": [ + { + "bbox": [ + 70, + 252, + 541, + 291 + ], + "type": "text", + "content": "Jiayu Wang, Yifei Ming, Zhenmei Shi, Vibhav Vineet, Xin Wang, Yixuan Li, and Neel Joshi. Is a picture worth a thousand words? delving into spatial reasoning for vision language models. In The Thirty-Eighth Annual Conference on Neural Information Processing Systems, 2024b." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 298, + 541, + 323 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 298, + 541, + 323 + ], + "spans": [ + { + "bbox": [ + 70, + 298, + 541, + 323 + ], + "type": "text", + "content": "Junlin Wang, Jue Wang, Ben Athiwaratkun, Ce Zhang, and James Zou. Mixture-of-agents enhances large language model capabilities, 2024c. URL https://arxiv.org/abs/2406.04692." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 331, + 541, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 331, + 541, + 380 + ], + "spans": [ + { + "bbox": [ + 70, + 331, + 541, + 380 + ], + "type": "text", + "content": "Lei Wang, Chen Ma, Xueyang Feng, Zeyu Zhang, Hao Yang, Jingsen Zhang, Zhiyuan Chen, Jiakai Tang, Xu Chen, Yankai Lin, Wayne Xin Zhao, Zhewei Wei, and Jirong Wen. A survey on large language model based autonomous agents. Frontiers of Computer Science, 18(6), March 2024d. ISSN 2095-2236. doi: 10.1007/s11704-024-40231-1. URL http://dx.doi.org/10.1007/s11704-024-40231-1." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 388, + 541, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 388, + 541, + 449 + ], + "spans": [ + { + "bbox": [ + 70, + 388, + 541, + 449 + ], + "type": "text", + "content": "Liang Wang, Nan Yang, and Furu Wei. Learning to retrieve in-context examples for large language models. In Yvette Graham and Matthew Purver (eds.), Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 1752-1767, St. Julian's, Malta, March 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.eacl-long.105/." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 457, + 541, + 519 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 457, + 541, + 519 + ], + "spans": [ + { + "bbox": [ + 70, + 457, + 541, + 519 + ], + "type": "text", + "content": "Peifeng Wang, Zhengyang Wang, Zheng Li, Yifan Gao, Bing Yin, and Xiang Ren. SCOTT: Self-consistent chain-of-thought distillation. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 5546-5558, Toronto, Canada, July 2023b. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.304. URL https://aclanthology.org/2023.acl-long.304/." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 526, + 541, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 526, + 541, + 552 + ], + "spans": [ + { + "bbox": [ + 69, + 526, + 541, + 552 + ], + "type": "text", + "content": "Peifeng Wang, Austin Xu, Yilun Zhou, Caiming Xiong, and Shafiq Joty. Direct judgement preference optimization. arXiv preprint arXiv:2409.14664, 2024f." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 559, + 541, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 559, + 541, + 620 + ], + "spans": [ + { + "bbox": [ + 70, + 559, + 541, + 620 + ], + "type": "text", + "content": "Peiyi Wang, Lei Li, Zhihong Shao, Runxin Xu, Damai Dai, Yifei Li, Deli Chen, Yu Wu, and Zhifang Sui. Math-shepherd: Verify and reinforce llms step-by-step without human annotations. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2024, Bangkok, Thailand, August 11-16, 2024, pp. 9426-9439. Association for Computational Linguistics, 2024g. URL https://doi.org/10.18653/v1/2024.acl-long.510." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 628, + 541, + 654 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 628, + 541, + 654 + ], + "spans": [ + { + "bbox": [ + 69, + 628, + 541, + 654 + ], + "type": "text", + "content": "Qineng Wang, Zihao Wang, Ying Su, Hanghang Tong, and Yangqiu Song. Rethinking the bounds of llm reasoning: Are multi-agent discussions the key?, 2024h. URL https://arxiv.org/abs/2402.18272." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 662, + 541, + 688 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 662, + 541, + 688 + ], + "spans": [ + { + "bbox": [ + 69, + 662, + 541, + 688 + ], + "type": "text", + "content": "Song Wang, Zihan Chen, Chengshuai Shi, Cong Shen, and Jundong Li. Mixture of demonstrations for in-context learning. Advances in Neural Information Processing Systems, 37:88091-88116, 2024i." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 695, + 541, + 733 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 695, + 541, + 733 + ], + "spans": [ + { + "bbox": [ + 70, + 695, + 541, + 733 + ], + "type": "text", + "content": "Tianlu Wang, Ping Yu, Xiaoqing Ellen Tan, Sean O'Brien, Ramakanth Pasunuru, Jane Dwivedi-Yu, Olga Golovneva, Luke Zettlemoyer, Maryam Fazel-Zarandi, and Asli Celikyilmaz. Shepherd: A critic for language model generation. arXiv preprint arXiv:2308.04592, 2023c." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "66" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 65 + }, + { + "para_blocks": [ + { + "bbox": [ + 70, + 81, + 541, + 732 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 70, + 81, + 541, + 119 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 81, + 541, + 119 + ], + "spans": [ + { + "bbox": [ + 70, + 81, + 541, + 119 + ], + "type": "text", + "content": "Tianlu Wang, Ilia Kulikov, Olga Golovneva, Ping Yu, Weizhe Yuan, Jane Dwivedi-Yu, Richard Yuanzhe Pang, Maryam Fazel-Zarandi, Jason Weston, and Xian Li. Self-taught evaluators. arXiv preprint arXiv:2408.02666, 2024j." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 127, + 541, + 163 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 127, + 541, + 163 + ], + "spans": [ + { + "bbox": [ + 70, + 127, + 541, + 163 + ], + "type": "text", + "content": "Xinyi Wang, Lucas Caccia, Oleksiy Ostapenko, Xingdi Yuan, and Alessandro Sordoni. Guiding language model reasoning with planning tokens. CoRR, abs/2310.05707, 2023d. doi: 10.48550/ARXIV.2310.05707. URL https://doi.org/10.48550/arXiv.2310.05707." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 172, + 541, + 220 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 172, + 541, + 220 + ], + "spans": [ + { + "bbox": [ + 70, + 172, + 541, + 220 + ], + "type": "text", + "content": "Xinyi Wang, Wanrong Zhu, Michael Saxon, Mark Steyvers, and William Yang Wang. Large language models are latent variable models: Explaining and finding good demonstrations for in-context learning. In Thirty-seventh Conference on Neural Information Processing Systems, 2023e. URL https://openreview.net/forum?id=BGvkwZEGt7." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 230, + 541, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 230, + 541, + 255 + ], + "spans": [ + { + "bbox": [ + 70, + 230, + 541, + 255 + ], + "type": "text", + "content": "Xuezhi Wang and Denny Zhou. Chain-of-thought reasoning without prompting. arXiv preprint arXiv:2402.10200, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 263, + 541, + 311 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 263, + 541, + 311 + ], + "spans": [ + { + "bbox": [ + 70, + 263, + 541, + 311 + ], + "type": "text", + "content": "Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc V Le, Ed H. Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models. In The Eleventh International Conference on Learning Representations, 2023f. URL https://openreview.net/forum?id=1PL1NIMMrw." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 320, + 541, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 320, + 541, + 357 + ], + "spans": [ + { + "bbox": [ + 70, + 320, + 541, + 357 + ], + "type": "text", + "content": "Yidong Wang, Zhuohao Yu, Wenjin Yao, Zhengran Zeng, Linyi Yang, Cunxiang Wang, Hao Chen, Chaoya Jiang, Rui Xie, Jindong Wang, et al. Pandalm: An automatic evaluation benchmark for llm instruction tuning optimization. In The Twelfth International Conference on Learning Representations, 2023g." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 366, + 541, + 391 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 366, + 541, + 391 + ], + "spans": [ + { + "bbox": [ + 70, + 366, + 541, + 391 + ], + "type": "text", + "content": "Yuqing Wang and Yun Zhao. Rupbench: Benchmarking reasoning under perturbations for robustness evaluation in large language models. arXiv preprint arXiv:2406.11020, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 399, + 541, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 399, + 541, + 460 + ], + "spans": [ + { + "bbox": [ + 70, + 399, + 541, + 460 + ], + "type": "text", + "content": "Zihan Wang, Yunxuan Li, Yuexin Wu, Liangchen Luo, Le Hou, Hongkun Yu, and Jingbo Shang. Multi-step problem solving through a verifier: An empirical analysis on model-induced process supervision. In Findings of the Association for Computational Linguistics: EMNLP 2024, Miami, Florida, USA, November 12-16, 2024, pp. 7309-7319. Association for Computational Linguistics, 2024k. URL https://aclanthology.org/2024-findings-emnlp.429." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 468, + 541, + 506 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 468, + 541, + 506 + ], + "spans": [ + { + "bbox": [ + 70, + 468, + 541, + 506 + ], + "type": "text", + "content": "Zihan Wang, Yunxuan Li, Yuexin Wu, Liangchen Luo, Le Hou, Hongkun Yu, and Jingbo Shang. Multi-step problem solving through a verifier: An empirical analysis on model-induced process supervision. arXiv preprint arXiv:2402.02658, 2024l." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 514, + 541, + 549 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 514, + 541, + 549 + ], + "spans": [ + { + "bbox": [ + 70, + 514, + 541, + 549 + ], + "type": "text", + "content": "Zihao Wang, Anji Liu, Haowei Lin, Jiaqi Li, Xiaojian Ma, and Yitao Liang. Rat: Retrieval augmented thoughts elicit context-aware reasoning in long-horizon generation. arXiv preprint arXiv:2403.05313, 2024m." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 559, + 541, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 559, + 541, + 608 + ], + "spans": [ + { + "bbox": [ + 70, + 559, + 541, + 608 + ], + "type": "text", + "content": "Zilong Wang, Hao Zhang, Chun-Liang Li, Julian Martin Eisenschlos, Vincent Perot, Zifeng Wang, Lesly Miculicich, Yasuhisa Fujii, Jingbo Shang, Chen-Yu Lee, and Tomas Pfister. Chain-of-table: Evolving tables in the reasoning chain for table understanding. In The Twelfth International Conference on Learning Representations, 2024n. URL https://openreview.net/forum?id=4L0xnS4GQM." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 616, + 541, + 642 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 616, + 541, + 642 + ], + "spans": [ + { + "bbox": [ + 70, + 616, + 541, + 642 + ], + "type": "text", + "content": "Peter Cathcart Wason and Philip Nicholas JohnsonLaird. Psychology of reasoning: Structure and content. Harvard University Press, 86, 1972." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 651, + 541, + 687 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 651, + 541, + 687 + ], + "spans": [ + { + "bbox": [ + 70, + 651, + 541, + 687 + ], + "type": "text", + "content": "Jason Wei, Yi Tay, Rishi Bommasani, Colin Raffel, Barret Zoph, Sebastian Borgeaud, Dani Yogatama, Maarten Bosma, Denny Zhou, Donald Metzler, et al. Emergent abilities of large language models. arXiv preprint arXiv:2206.07682, 2022a." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 696, + 541, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 696, + 541, + 732 + ], + "spans": [ + { + "bbox": [ + 70, + 696, + 541, + 732 + ], + "type": "text", + "content": "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837, 2022b." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "67" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 66 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 81, + 541, + 732 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 70, + 81, + 541, + 130 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 81, + 541, + 130 + ], + "spans": [ + { + "bbox": [ + 70, + 81, + 541, + 130 + ], + "type": "text", + "content": "Yuxiang Wei, Zhe Wang, Jiawei Liu, Yifeng Ding, and Lingming Zhang. Magicoder: Empowering code generation with OSS-instruct. In Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pp. 52632-52657. PMLR, 21-27 Jul 2024. URL https://proceedings.mlr.press/v235/wei24h.html." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 140, + 541, + 176 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 140, + 541, + 176 + ], + "spans": [ + { + "bbox": [ + 70, + 140, + 541, + 176 + ], + "type": "text", + "content": "Yuxiang Wei, Olivier Duchenne, Jade Copet, Quentin Carbonneaux, Lingming Zhang, Daniel Fried, Gabriel Synnaeve, Rishabh Singh, and Sida I. Wang. Swe-rl: Advancing llm reasoning via reinforcement learning on open software evolution, 2025. URL https://arxiv.org/abs/2502.18449." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 186, + 541, + 212 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 186, + 541, + 212 + ], + "spans": [ + { + "bbox": [ + 70, + 186, + 541, + 212 + ], + "type": "text", + "content": "Nathaniel Weir, Muhammad Khalifa, Linlu Qiu, Orion Weller, and Peter Clark. Learning to reason via program generation, emulation, and search. arXiv preprint arXiv:2405.16337, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 220, + 541, + 257 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 220, + 541, + 257 + ], + "spans": [ + { + "bbox": [ + 70, + 220, + 541, + 257 + ], + "type": "text", + "content": "Sean Welleck, Amanda Bertsch, Matthew Finlayson, Hailey Schoelkopf, Alex Xie, Graham Neubig, Ilia Kulikov, and Zaid Harchaoui. From decoding to meta-generation: Inference-time algorithms for large language models. arXiv preprint arXiv:2406.16838, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 266, + 541, + 292 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 266, + 541, + 292 + ], + "spans": [ + { + "bbox": [ + 70, + 266, + 541, + 292 + ], + "type": "text", + "content": "Ying Wen, Yaodong Yang, Rui Luo, Jun Wang, and Wei Pan. Probabilistic recursive reasoning for multi-agent reinforcement learning. arXiv preprint arXiv:1901.09207, 2019." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 301, + 541, + 325 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 301, + 541, + 325 + ], + "spans": [ + { + "bbox": [ + 69, + 301, + 541, + 325 + ], + "type": "text", + "content": "Lily Weng. Llm-powered autonomous agents. *Github*, 2023. URL https://lilianweng.github.io/posts/2023-06-23-agent/." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 335, + 541, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 335, + 541, + 360 + ], + "spans": [ + { + "bbox": [ + 70, + 335, + 541, + 360 + ], + "type": "text", + "content": "Martin Weyssow, Aton Kamanda, and Houari A. Sahraoui. Codeultrafeedback: An llm-as-a-judge dataset for aligning large language models to coding preferences. CoRR, abs/2403.09032, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 370, + 541, + 395 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 370, + 541, + 395 + ], + "spans": [ + { + "bbox": [ + 69, + 370, + 541, + 395 + ], + "type": "text", + "content": "Sarah Wegreffe, Ana Marasovic, and Noah A Smith. Measuring association between labels and free-text rationales. arXiv preprint arXiv:2010.12762, 2020." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 404, + 541, + 477 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 404, + 541, + 477 + ], + "spans": [ + { + "bbox": [ + 70, + 404, + 541, + 477 + ], + "type": "text", + "content": "Sarah Wiegrefe, Jack Hessel, Swabha Swayamdipta, Mark Riedl, and Yejin Choi. Reframing human-AI collaboration for generating free-text explanations. In Marine Carpuat, Marie-Catherine de Marneffe, and Ivan Vladimir Meza Ruiz (eds.), Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 632-658, Seattle, United States, July 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.naacl-main.47. URL https://aclanthology.org/2022.naacl-main.47/." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 487, + 541, + 511 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 487, + 541, + 511 + ], + "spans": [ + { + "bbox": [ + 70, + 487, + 541, + 511 + ], + "type": "text", + "content": "Ronald J Williams. Simple statistical gradient-following algorithms for connectionist reinforcement learning. Machine learning, 8:229-256, 1992." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 521, + 541, + 556 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 521, + 541, + 556 + ], + "spans": [ + { + "bbox": [ + 70, + 521, + 541, + 556 + ], + "type": "text", + "content": "Yuhuai Wu, Albert Jiang, Wenda Li, Markus Rabe, Charles Staats, Mateja Jamnik, and Christian Szegedy. Autoformalization with large language models. In Neural Information Processing Systems (NeurIPS), 2022." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 567, + 541, + 651 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 567, + 541, + 651 + ], + "spans": [ + { + "bbox": [ + 70, + 567, + 541, + 651 + ], + "type": "text", + "content": "Zhaofeng Wu, Linlu Qiu, Alexis Ross, Ekin Akyurek, Boyuan Chen, Bailin Wang, Najoung Kim, Jacob Andreas, and Yoon Kim. Reasoning or reciting? exploring the capabilities and limitations of language models through counterfactual tasks. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 1819-1862, Mexico City, Mexico, June 2024a. Association for Computational Linguistics. doi: 10.18653/v1/2024.naacl-long.102. URL https://aclanthology.org/2024.naacl-long.102/." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 662, + 541, + 687 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 662, + 541, + 687 + ], + "spans": [ + { + "bbox": [ + 69, + 662, + 541, + 687 + ], + "type": "text", + "content": "Zhenyu Wu, Qingkai Zeng, Zhihan Zhang, Zhaoxuan Tan, Chao Shen, and Meng Jiang. Enhancing mathematical reasoning in llms by stepwise correction. arXiv preprint arXiv:2410.12934, 2024b." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 696, + 541, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 696, + 541, + 732 + ], + "spans": [ + { + "bbox": [ + 70, + 696, + 541, + 732 + ], + "type": "text", + "content": "Zijian Wu, Suozhi Huang, Zhejian Zhou, Huaiyuan Ying, Jiayu Wang, Dahua Lin, and Kai Chen. Internl m2. 5-stepprover: Advancing automated theorem proving via expert iteration on large-scale lean problems. arXiv preprint arXiv:2410.15700, 2024c." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "text", + "content": "68" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 67 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 81, + 541, + 732 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 70, + 81, + 541, + 143 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 81, + 541, + 143 + ], + "spans": [ + { + "bbox": [ + 70, + 81, + 541, + 143 + ], + "type": "text", + "content": "Zhiheng Xi, Wenxiang Chen, Xin Guo, Wei He, Yiwen Ding, Boyang Hong, Ming Zhang, Junzhe Wang, Senjie Jin, Enyu Zhou, Rui Zheng, Xiaoran Fan, Xiao Wang, Limao Xiong, Yuhao Zhou, Weiran Wang, Changhao Jiang, Yicheng Zou, Xiangyang Liu, Zhangyue Yin, Shihan Dou, Rongxiang Weng, Wensen Cheng, Qi Zhang, Wenjuan Qin, Yongyan Zheng, Xipeng Qiu, Xuanjing Huang, and Tao Gui. The rise and potential of large language model based agents: A survey. arXiv preprint arXiv:2309.07864, 2023." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 152, + 541, + 213 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 152, + 541, + 213 + ], + "spans": [ + { + "bbox": [ + 70, + 152, + 541, + 213 + ], + "type": "text", + "content": "Zhiheng Xi, Dingwen Yang, Jixuan Huang, Jiafu Tang, Guanyu Li, Yiwen Ding, Wei He, Boyang Hong, Shihan Do, Wenyu Zhan, Xiao Wang, Rui Zheng, Tao Ji, Xiaowei Shi, Yitao Zhai, Rongxiang Weng, Jingang Wang, Xunliang Cai, Tao Gui, Zuxuan Wu, Qi Zhang, Xipeng Qiu, Xuanjing Huang, and YuGang Jiang. Enhancing llm reasoning via critique models with test-time and training-time supervision, 2024. URL https://arxiv.org/abs/2411.16579." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 222, + 541, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 222, + 541, + 247 + ], + "spans": [ + { + "bbox": [ + 70, + 222, + 541, + 247 + ], + "type": "text", + "content": "Sang Michael Xie, Aditi Raghunathan, Percy Liang, and Tengyu Ma. An explanation of in-context learning as implicit bayesian inference. In International Conference on Learning Representations, 2022." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 256, + 541, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 256, + 541, + 281 + ], + "spans": [ + { + "bbox": [ + 70, + 256, + 541, + 281 + ], + "type": "text", + "content": "Zhihui Xie, Liyu Chen, Weichao Mao, Jingjing Xu, Lingpeng Kong, et al. Teaching language models to critique via reinforcement learning. arXiv preprint arXiv:2502.03492, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 290, + 541, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 290, + 541, + 338 + ], + "spans": [ + { + "bbox": [ + 70, + 290, + 541, + 338 + ], + "type": "text", + "content": "Huajian Xin, Daya Guo, Zhihong Shao, Zhizhou Ren, Qihao Zhu, Bo Liu, Chong Ruan, Wenda Li, and Xiaodan Liang. Deepseek-prover: Advancing theorem proving in llms through large-scale synthetic data. CoRR, abs/2405.14333, 2024a. doi: 10.48550/ARXIV.2405.14333. URL https://doi.org/10.48550/arXiv.2405.14333." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 348, + 541, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 348, + 541, + 396 + ], + "spans": [ + { + "bbox": [ + 70, + 348, + 541, + 396 + ], + "type": "text", + "content": "Huajian Xin, ZZ Ren, Junxiao Song, Zhihong Shao, Wanjia Zhao, Haocheng Wang, Bo Liu, Liyue Zhang, Xuan Lu, Qiushi Du, et al. Deepseek-prover-v1. 5: Harnessing proof assistant feedback for reinforcement learning and monte-carlo tree search. arXiv preprint arXiv:2408.08152, 2024b. URL https://arxiv.org/abs/2408.08152." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 406, + 541, + 431 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 406, + 541, + 431 + ], + "spans": [ + { + "bbox": [ + 69, + 406, + 541, + 431 + ], + "type": "text", + "content": "Wei Xiong, Hanning Zhang, Chenlu Ye, Lichang Chen, Nan Jiang, and Tong Zhang. Self-rewarding correction for mathematical reasoning, 2025. URL https://arxiv.org/abs/2502.19613." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 441, + 541, + 476 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 441, + 541, + 476 + ], + "spans": [ + { + "bbox": [ + 70, + 441, + 541, + 476 + ], + "type": "text", + "content": "Austin Xu, Srijan Bansal, Yifei Ming, Semih Yavuz, and Shafiq Joty. Does context matter? contextual judgebench for evaluating llm-based judges in contextual settings. arXiv preprint arXiv:2503.15620, 2025a." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 487, + 541, + 512 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 487, + 541, + 512 + ], + "spans": [ + { + "bbox": [ + 70, + 487, + 541, + 512 + ], + "type": "text", + "content": "Austin Xu, Yilun Zhou, Xuan-Phi Nguyen, Caiming Xiong, and Shafiq Joty. J4r: Learning to judge with equivalent initial state group relative policy optimization. arXiv preprint arXiv:2505.13346, 2025b." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 521, + 541, + 570 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 521, + 541, + 570 + ], + "spans": [ + { + "bbox": [ + 70, + 521, + 541, + 570 + ], + "type": "text", + "content": "Can Xu, Qingfeng Sun, Kai Zheng, Xiubo Geng, Pu Zhao, Jiazhan Feng, Chongyang Tao, Qingwei Lin, and Daxin Jiang. Wizardlm: Empowering large pre-trained language models to follow complex instructions. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024a." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 579, + 541, + 616 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 579, + 541, + 616 + ], + "spans": [ + { + "bbox": [ + 70, + 579, + 541, + 616 + ], + "type": "text", + "content": "Fangzhi Xu, Qika Lin, Jiawei Han, Tianzhe Zhao, Jun Liu, and Erik Cambria. Are large language models really good logical reasoners? a comprehensive evaluation and beyond. IEEE Transactions on Knowledge and Data Engineering, 2025c." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 625, + 541, + 662 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 625, + 541, + 662 + ], + "spans": [ + { + "bbox": [ + 70, + 625, + 541, + 662 + ], + "type": "text", + "content": "Fengli Xu, Qianyue Hao, Zefang Zong, Jingwei Wang, Yunke Zhang, Jingyi Wang, Xiaochong Lan, Jiahui Gong, Tianjian Ouyang, Fanjin Meng, et al. Towards large reasoning models: A survey of reinforced reasoning with large language models. arXiv preprint arXiv:2501.09686, 2025d." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 672, + 541, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 672, + 541, + 732 + ], + "spans": [ + { + "bbox": [ + 70, + 672, + 541, + 732 + ], + "type": "text", + "content": "Hanwei Xu, Yujun Chen, Yulun Du, Nan Shao, Wang Yanggang, Haiyu Li, and Zhilin Yang. GPS: Genetic prompt search for efficient few-shot learning. In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang (eds.), Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 8162-8171, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.emnlp-main.559. URL https://aclanthology.org/2022.emnlp-main.559/." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "69" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 68 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 81, + 541, + 731 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 69, + 81, + 541, + 119 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 81, + 541, + 119 + ], + "spans": [ + { + "bbox": [ + 69, + 81, + 541, + 119 + ], + "type": "text", + "content": "Haotian Xu, Xing Wu, Weinong Wang, Zhongzhi Li, Da Zheng, Boyuan Chen, Yi Hu, Shijia Kang, Jiaming Ji, Yingying Zhang, et al. Redstar: Does scaling long-cot data unlock better slow-reasoning systems? arXiv preprint arXiv:2501.11284, 2025e." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 124, + 541, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 124, + 541, + 162 + ], + "spans": [ + { + "bbox": [ + 69, + 124, + 541, + 162 + ], + "type": "text", + "content": "Kehan Xu, Kun Zhang, Jingyuan Li, Wei Huang, and Yuanzhuo Wang. Crp-rag: A retrieval-augmented generation framework for supporting complex logical reasoning and knowledge planning. _Electronics_, 14 (1):47, 2024b." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 166, + 541, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 166, + 541, + 203 + ], + "spans": [ + { + "bbox": [ + 69, + 166, + 541, + 203 + ], + "type": "text", + "content": "Xiaohan Xu, Ming Li, Chongyang Tao, Tao Shen, Reynold Cheng, Jinyang Li, Can Xu, Dacheng Tao, and Tianyi Zhou. A survey on knowledge distillation of large language models. arXiv preprint arXiv:2402.13116, 2024c." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 209, + 539, + 235 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 209, + 539, + 235 + ], + "spans": [ + { + "bbox": [ + 70, + 209, + 539, + 235 + ], + "type": "text", + "content": "Yige Xu, Xu Guo, Zhiwei Zeng, and Chunyan Miao. SoftCoT: Soft chain-of-thought for efficient reasoning with llms. arXiv preprint arXiv:2502.12134, 2025f." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 239, + 539, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 239, + 539, + 277 + ], + "spans": [ + { + "bbox": [ + 70, + 239, + 539, + 277 + ], + "type": "text", + "content": "Zhiwei Xu, Yunpeng Bai, Bin Zhang, Dapeng Li, and Guoliang Fan. Haven: Hierarchical cooperative multiagent reinforcement learning with dual coordination mechanism. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 37, pp. 11735-11743, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 282, + 539, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 282, + 539, + 319 + ], + "spans": [ + { + "bbox": [ + 70, + 282, + 539, + 319 + ], + "type": "text", + "content": "Yuchen Yan, Jin Jiang, Yang Liu, Yixin Cao, Xin Xu, Xunliang Cai, Jian Shao, et al. S3c-math: Spontaneous step-level self-correction makes large language models better mathematical reasoners. arXiv preprint arXiv:2409.01524, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 325, + 539, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 325, + 539, + 350 + ], + "spans": [ + { + "bbox": [ + 70, + 325, + 539, + 350 + ], + "type": "text", + "content": "An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, et al. Qwen2 technical report. arXiv preprint arXiv:2407.10671, 2024a." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 356, + 539, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 356, + 539, + 392 + ], + "spans": [ + { + "bbox": [ + 70, + 356, + 539, + 392 + ], + "type": "text", + "content": "An Yang, Beichen Zhang, Binyuan Hui, Bofei Gao, Bowen Yu, Chengpeng Li, Dayiheng Liu, Jianhong Tu, Jingren Zhou, Junyang Lin, et al. Qwen2. 5-math technical report: Toward mathematical expert model via self-improvement. arXiv preprint arXiv:2409.12122, 2024b." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 398, + 539, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 398, + 539, + 435 + ], + "spans": [ + { + "bbox": [ + 70, + 398, + 539, + 435 + ], + "type": "text", + "content": "Chengrun Yang, Xuezhi Wang, Yifeng Lu, Hanxiao Liu, Quoc V Le, Denny Zhou, and Xinyun Chen. Large language models as optimizers. In The Twelfth International Conference on Learning Representations, 2024c. URL https://openreview.net/forum?id=Bb4VGOWELI." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 441, + 539, + 466 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 441, + 539, + 466 + ], + "spans": [ + { + "bbox": [ + 70, + 441, + 539, + 466 + ], + "type": "text", + "content": "Jinghan Yang, Shuming Ma, and Furu Wei. Auto-icl: In-context learning without human supervision. arXiv preprint arXiv:2311.09263, 2023a. URL https://arxiv.org/abs/2311.09263." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 472, + 539, + 508 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 472, + 539, + 508 + ], + "spans": [ + { + "bbox": [ + 70, + 472, + 539, + 508 + ], + "type": "text", + "content": "Kaiyu Yang, Aidan Swope, Alex Gu, Rahul Chalamala, Peiyang Song, Shixing Yu, Saad Godil, Ryan Prenger, and Anima Anandkumar. LeanDojo: Theorem proving with retrieval-augmented language models. In Neural Information Processing Systems (NeurIPS), 2023b." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 514, + 539, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 514, + 539, + 540 + ], + "spans": [ + { + "bbox": [ + 70, + 514, + 539, + 540 + ], + "type": "text", + "content": "Kaiyu Yang, Gabriel Poesia, Jingxuan He, Wenda Li, Kristin Lauter, Swarat Chaudhuri, and Dawn Song. Formal mathematical reasoning: A new frontier in ai. arXiv preprint arXiv:2412.16075, 2024d." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 544, + 539, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 544, + 539, + 581 + ], + "spans": [ + { + "bbox": [ + 70, + 544, + 539, + 581 + ], + "type": "text", + "content": "Ruihan Yang, Jiangjie Chen, Yikai Zhang, Siyu Yuan, Aili Chen, Kyle Richardson, Yanghua Xiao, and Deqing Yang. Selfgoal: Your language agents already know how to achieve high-level goals. arXiv preprint arXiv:2406.04784, 2024e." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 586, + 539, + 648 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 586, + 539, + 648 + ], + "spans": [ + { + "bbox": [ + 70, + 586, + 539, + 648 + ], + "type": "text", + "content": "Zonglin Yang, Li Dong, Xinya Du, Hao Cheng, Erik Cambria, Xiaodong Liu, Jianfeng Gao, and Furu Wei. Language models as inductive reasoners. In Yvette Graham and Matthew Purver (eds.), Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 209-225, St. Julian's, Malta, March 2024f. Association for Computational Linguistics. URL https://aclanthology.org/2024.eacl-long.13/." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 70, + 654, + 539, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 654, + 539, + 679 + ], + "spans": [ + { + "bbox": [ + 70, + 654, + 539, + 679 + ], + "type": "text", + "content": "Shunyu Yao and Karthik Narasimhan. Language agents in the digital world: Opportunities and risks. _princeton-nlp.github.io_, Jul 2023. URL https://princeton-nlp.github.io/language-agent-impact/." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 70, + 684, + 539, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 684, + 539, + 731 + ], + "spans": [ + { + "bbox": [ + 70, + 684, + 539, + 731 + ], + "type": "text", + "content": "Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Thomas L. Griffiths, Yuan Cao, and Karthik R Narasimhan. Tree of thoughts: Deliberate problem solving with large language models. In Thirty-seventh Conference on Neural Information Processing Systems, 2023a. URL https://openreview.net/forum?id=5Xc1ecx01h." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "text", + "content": "70" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 69 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 81, + 541, + 734 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 69, + 81, + 541, + 119 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 81, + 541, + 119 + ], + "spans": [ + { + "bbox": [ + 69, + 81, + 541, + 119 + ], + "type": "text", + "content": "Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik Narasimhan, and Yuan Cao. Re-Act: Synergizing reasoning and acting in language models. In International Conference on Learning Representations (ICLR), 2023b." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 126, + 541, + 165 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 126, + 541, + 165 + ], + "spans": [ + { + "bbox": [ + 69, + 126, + 541, + 165 + ], + "type": "text", + "content": "Weiran Yao, Shelby Heinecke, Juan Carlos Niebles, Zhiwei Liu, Yihao Feng, Le Xue, Rithesh Murthy, Zeyuan Chen, Jianguo Zhang, Devansh Arpit, et al. Retroformer: Retrospective large language agents with policy gradient optimization. arXiv preprint arXiv:2308.02151, 2023c." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 172, + 541, + 211 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 172, + 541, + 211 + ], + "spans": [ + { + "bbox": [ + 69, + 172, + 541, + 211 + ], + "type": "text", + "content": "Michihiro Yasunaga, Xinyun Chen, Yujia Li, Panupong Pasupat, Jure Leskovec, Percy Liang, Ed H. Chi, and Denny Zhou. Large language models as analogical reasoners. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=AgDICX1h50." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 217, + 541, + 256 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 217, + 541, + 256 + ], + "spans": [ + { + "bbox": [ + 69, + 217, + 541, + 256 + ], + "type": "text", + "content": "He Ye, Matias Martinez, Xiapu Luo, Tao Zhang, and Martin Monperrus. Selfapr: Self-supervised program repair with test execution diagnostics. In Proceedings of the 37th IEEE/ACM International Conference on Automated Software Engineering, pp. 1-13, 2022. URL https://arxiv.org/abs/2203.12755." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 262, + 541, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 262, + 541, + 289 + ], + "spans": [ + { + "bbox": [ + 69, + 262, + 541, + 289 + ], + "type": "text", + "content": "Jiacheng Ye, Zhiyong Wu, Jiangtao Feng, Tao Yu, and Lingpeng Kong. Compositional exemplars for in-context learning. In International Conference on Machine Learning, pp. 39818-39833. PMLR, 2023a." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 296, + 541, + 323 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 296, + 541, + 323 + ], + "spans": [ + { + "bbox": [ + 69, + 296, + 541, + 323 + ], + "type": "text", + "content": "Yixin Ye, Zhen Huang, Yang Xiao, Ethan Chern, Shijie Xia, and Pengfei Liu. Limo: Less is more for reasoning. arXiv preprint arXiv:2502.03387, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 330, + 541, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 330, + 541, + 392 + ], + "spans": [ + { + "bbox": [ + 69, + 330, + 541, + 392 + ], + "type": "text", + "content": "Yunhu Ye, Binyuan Hui, Min Yang, Binhua Li, Fei Huang, and Yongbin Li. Large language models are versatile decomposers: Decomposing evidence and questions for table-based reasoning. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '23, pp. 174-184, New York, NY, USA, 2023b. Association for Computing Machinery. ISBN 9781450394086. doi: 10.1145/3539618.3591708. URL https://doi.org/10.1145/3539618.3591708." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 399, + 541, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 399, + 541, + 460 + ], + "spans": [ + { + "bbox": [ + 69, + 399, + 541, + 460 + ], + "type": "text", + "content": "Yunhu Ye, Binyuan Hui, Min Yang, Binhua Li, Fei Huang, and Yongbin Li. Large language models are versatile decomposers: Decomposing evidence and questions for table-based reasoning. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '23, pp. 174-184, New York, NY, USA, 2023c. Association for Computing Machinery. ISBN 9781450394086. doi: 10.1145/3539618.3591708. URL https://doi.org/10.1145/3539618.3591708." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 468, + 541, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 468, + 541, + 529 + ], + "spans": [ + { + "bbox": [ + 69, + 468, + 541, + 529 + ], + "type": "text", + "content": "Yunhu Ye, Binyuan Hui, Min Yang, Binhua Li, Fei Huang, and Yongbin Li. Large language models are versatile decomposers: Decomposing evidence and questions for table-based reasoning. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '23, pp. 174-184, New York, NY, USA, 2023d. Association for Computing Machinery. ISBN 9781450394086. doi: 10.1145/3539618.3591708. URL https://doi.org/10.1145/3539618.3591708." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 537, + 541, + 574 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 537, + 541, + 574 + ], + "spans": [ + { + "bbox": [ + 69, + 537, + 541, + 574 + ], + "type": "text", + "content": "Ziyi Ye, Xiangsheng Li, Qiuchi Li, Qingyao Ai, Yujia Zhou, Wei Shen, Dong Yan, and Yiqun Liu. Beyond scalar reward model: Learning generative judge from preference data. arXiv preprint arXiv:2410.03742, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 582, + 541, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 582, + 541, + 609 + ], + "spans": [ + { + "bbox": [ + 69, + 582, + 541, + 609 + ], + "type": "text", + "content": "Edward Yeo, Yuxuan Tong, Morry Niu, Graham Neubig, and Xiang Yue. Demystifying long chain-of-thought reasoning in llms. arXiv preprint arXiv:2502.03373, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 616, + 541, + 655 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 616, + 541, + 655 + ], + "spans": [ + { + "bbox": [ + 69, + 616, + 541, + 655 + ], + "type": "text", + "content": "Shuo Yin, Weihao You, Zhilong Ji, Guoqiang Zhong, and Jinfeng Bai. Mumath-code: Combining tool-use large language models with multi-perspective data augmentation for mathematical reasoning. arXiv preprint arXiv:2405.07551, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 662, + 541, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 662, + 541, + 689 + ], + "spans": [ + { + "bbox": [ + 69, + 662, + 541, + 689 + ], + "type": "text", + "content": "Fei Yu, Hongbo Zhang, Prayag Tiwari, and Benyou Wang. Natural language reasoning, a survey. ACM Computing Surveys, 56(12):1-39, 2024a." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 695, + 541, + 734 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 695, + 541, + 734 + ], + "spans": [ + { + "bbox": [ + 69, + 695, + 541, + 734 + ], + "type": "text", + "content": "Longhui Yu, Weisen Jiang, Han Shi, Jincheng Yu, Zhengying Liu, Yu Zhang, James T Kwok, Zhenguo Li, Adrian Weller, and Weiyang Liu. Metamath: Bootstrap your own mathematical questions for large language models. arXiv preprint arXiv:2309.12284, 2023a." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "type": "text", + "content": "71" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 70 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 81, + 541, + 733 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 70, + 81, + 541, + 119 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 81, + 541, + 119 + ], + "spans": [ + { + "bbox": [ + 70, + 81, + 541, + 119 + ], + "type": "text", + "content": "Longhui Yu, Weisen Jiang, Han Shi, YU Jincheng, Zhengying Liu, Yu Zhang, James Kwok, Zhenguo Li, Adrian Weller, and Weiyang Liu. MetaMath: Bootstrap your own mathematical questions for large language models. In International Conference on Learning Representations (ICLR), 2024b." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 125, + 541, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 125, + 541, + 175 + ], + "spans": [ + { + "bbox": [ + 70, + 125, + 541, + 175 + ], + "type": "text", + "content": "Longhui Yu, Weisen Jiang, Han Shi, Jincheng Yu, Zhengying Liu, Yu Zhang, James T. Kwok, Zhenguo Li, Adrian Weller, and Weiyang Liu. Metamath: Bootstrap your own mathematical questions for large language models. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024c." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 181, + 541, + 254 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 181, + 541, + 254 + ], + "spans": [ + { + "bbox": [ + 70, + 181, + 541, + 254 + ], + "type": "text", + "content": "Qiying Yu, Zheng Zhang, Ruofei Zhu, Yufeng Yuan, Xiaochen Zuo, Yu Yue, Tiantian Fan, Gaohong Liu, Lingjun Liu, Xin Liu, Haibin Lin, Zhiqi Lin, Bole Ma, Guangming Sheng, Yuxuan Tong, Chi Zhang, Mofan Zhang, Wang Zhang, Hang Zhu, Jinhua Zhu, Jiaze Chen, Jiangjie Chen, Chengyi Wang, Hongli Yu, Weinan Dai, Yuxuan Song, Xiangpeng Wei, Hao Zhou, Jingjing Liu, Wei-Ying Ma, Ya-Qin Zhang, Lin Yan, Mu Qiao, Yonghui Wu, and Mingxuan Wang. Dapo: An open-source llm reinforcement learning system at scale, 2025. URL https://arxiv.org/abs/2503.14476." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 261, + 541, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 261, + 541, + 286 + ], + "spans": [ + { + "bbox": [ + 70, + 261, + 541, + 286 + ], + "type": "text", + "content": "Zhouliang Yu, Jie Fu, Yao Mu, Chenguang Wang, Lin Shao, and Yaodong Yang. Multireact: Multimodal tools augmented reasoning-acting traces for embodied agent planning. 2023b." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 293, + 541, + 329 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 293, + 541, + 329 + ], + "spans": [ + { + "bbox": [ + 70, + 293, + 541, + 329 + ], + "type": "text", + "content": "Zhuohao Yu, Chang Gao, Wenjin Yao, Yidong Wang, Wei Ye, Jindong Wang, Xing Xie, Yue Zhang, and Shikun Zhang. Kieval: A knowledge-grounded interactive evaluation framework for large language models. arXiv preprint arXiv:2402.15043, 2024d." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 337, + 541, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 337, + 541, + 385 + ], + "spans": [ + { + "bbox": [ + 70, + 337, + 541, + 385 + ], + "type": "text", + "content": "Lifan Yuan, Ganqu Cui, Hanbin Wang, Ning Ding, Xingyao Wang, Jia Deng, Boji Shan, Huimin Chen, Ruobing Xie, Yankai Lin, Zhenghao Liu, Bowen Zhou, Hao Peng, Zhiyuan Liu, and Maosong Sun. Advancing LLM reasoning generalists with preference trees. CoRR, abs/2404.02078, 2024a. doi: 10.48550/ARXIV.2404.02078. URL https://doi.org/10.48550/arXiv.2404.02078." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 392, + 541, + 428 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 392, + 541, + 428 + ], + "spans": [ + { + "bbox": [ + 70, + 392, + 541, + 428 + ], + "type": "text", + "content": "Lifan Yuan, Wendi Li, Huayu Chen, Ganqu Cui, Ning Ding, Kaiyan Zhang, Bowen Zhou, Zhiyuan Liu, and Hao Peng. Free process rewards without process labels, 2024b. URL https://arxiv.org/abs/2412.01981." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 436, + 541, + 462 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 436, + 541, + 462 + ], + "spans": [ + { + "bbox": [ + 69, + 436, + 541, + 462 + ], + "type": "text", + "content": "Siyu Yuan, Kaitao Song, Jiangjie Chen, Xu Tan, Dongsheng Li, and Deqing Yang. Evoagent: Towards automatic multi-agent generation via evolutionary algorithms. arXiv preprint arXiv:2406.14228, 2024c." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 468, + 541, + 494 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 468, + 541, + 494 + ], + "spans": [ + { + "bbox": [ + 70, + 468, + 541, + 494 + ], + "type": "text", + "content": "Weizhe Yuan, Richard Yuanzhe Pang, Kyunghyun Cho, Sainbayar Sukhbaatar, Jing Xu, and Jason Weston. Self-rewarding language models. arXiv preprint arXiv:2401.10020, 2024d." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 500, + 541, + 537 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 500, + 541, + 537 + ], + "spans": [ + { + "bbox": [ + 70, + 500, + 541, + 537 + ], + "type": "text", + "content": "Zheng Yuan, Hongyi Yuan, Chengpeng Li, Guanting Dong, Chuanqi Tan, and Chang Zhou. Scaling relationship on learning mathematical reasoning with large language models. CoRR, abs/2308.01825, 2023. doi: 10.48550/ARXIV.2308.01825. URL https://doi.org/10.48550/arXiv.2308.01825." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 544, + 541, + 582 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 544, + 541, + 582 + ], + "spans": [ + { + "bbox": [ + 70, + 544, + 541, + 582 + ], + "type": "text", + "content": "Murong Yue, Wenlin Yao, Haitao Mi, Dian Yu, Ziyu Yao, and Dong Yu. DOTS: Learning to reason dynamically in LLMs via optimal reasoning trajectories search. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=tn2mjzjSyR." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 588, + 541, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 588, + 541, + 613 + ], + "spans": [ + { + "bbox": [ + 69, + 588, + 541, + 613 + ], + "type": "text", + "content": "Xiang Yue, Xingwei Qu, Ge Zhang, Yao Fu, Wenhao Huang, Huan Sun, Yu Su, and Wenhu Chen. Mammoth: Building math generalist models through hybrid instruction tuning. arXiv preprint arXiv:2309.05653, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 620, + 541, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 620, + 541, + 658 + ], + "spans": [ + { + "bbox": [ + 70, + 620, + 541, + 658 + ], + "type": "text", + "content": "Eric Zelikman, Yuhuai Wu, Jesse Mu, and Noah Goodman. STar: Bootstrapping reasoning with reasoning. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho (eds.), Advances in Neural Information Processing Systems, 2022. URL https://openreview.net/forum?id=3ELRdg2sgI." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 664, + 541, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 664, + 541, + 700 + ], + "spans": [ + { + "bbox": [ + 69, + 664, + 541, + 700 + ], + "type": "text", + "content": "Zhiyuan Zeng, Qinyuan Cheng, Zhangyue Yin, Bo Wang, Shimin Li, Yunhua Zhou, Qipeng Guo, Xuanjing Huang, and Xipeng Qiu. Scaling of search and learning: A roadmap to reproduce o1 from reinforcement learning perspective. arXiv preprint arXiv:2412.14135, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 708, + 541, + 733 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 708, + 541, + 733 + ], + "spans": [ + { + "bbox": [ + 69, + 708, + 541, + 733 + ], + "type": "text", + "content": "Dan Zhang, Sining Zhoubian, Ziniu Hu, Yisong Yue, Yuxiao Dong, and Jie Tang. Rest-mcts*: Llm self-training via process reward guided tree search. arXiv preprint arXiv:2406.03816, 2024a." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "text", + "content": "72" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 71 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 81, + 541, + 733 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 70, + 81, + 541, + 130 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 81, + 541, + 130 + ], + "spans": [ + { + "bbox": [ + 70, + 81, + 541, + 130 + ], + "type": "text", + "content": "Di Zhang, Jianbo Wu, Jingdi Lei, Tong Che, Jiatong Li, Tong Xie, Xiaoshui Huang, Shufei Zhang, Marco Pavone, Yuqiang Li, Wanli Ouyang, and Dongzhan Zhou. Llama-berry: Pairwise optimization for o1-like olympiad-level mathematical reasoning. CoRR, abs/2410.02884, 2024b. URL https://doi.org/10.48550/arXiv.2410.02884." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 137, + 541, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 137, + 541, + 175 + ], + "spans": [ + { + "bbox": [ + 70, + 137, + 541, + 175 + ], + "type": "text", + "content": "Jiayi Zhang, Jinyu Xiang, Zhaoyang Yu, Fengwei Teng, Xionghui Chen, Jiaqi Chen, Mingchen Zhuge, Xin Cheng, Sirui Hong, Jinlin Wang, Bingnan Zheng, Bang Liu, Yuyu Luo, and Chenglin Wu. Aflow: Automating agentic workflow generation, 2024c. URL https://arxiv.org/abs/2410.10762." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 180, + 541, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 180, + 541, + 206 + ], + "spans": [ + { + "bbox": [ + 70, + 180, + 541, + 206 + ], + "type": "text", + "content": "Jun Zhang, Trey Hedden, and Adrian Chia. Perspective-taking and depth of theory-of-mind reasoning in sequential-move games. Cognitive science, 36(3):560-573, 2012." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 213, + 541, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 213, + 541, + 239 + ], + "spans": [ + { + "bbox": [ + 70, + 213, + 541, + 239 + ], + "type": "text", + "content": "Kexun Zhang, Shang Zhou, Danqing Wang, William Yang Wang, and Lei Li. Scaling llm inference with optimized sample compute allocation. arXiv preprint arXiv:2410.22480, 2024d." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 244, + 541, + 283 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 244, + 541, + 283 + ], + "spans": [ + { + "bbox": [ + 70, + 244, + 541, + 283 + ], + "type": "text", + "content": "Lunjun Zhang, Arian Hosseini, Hritik Bansal, Mehran Kazemi, Aviral Kumar, and Rishabh Agarwal. Generative verifiers: Reward modeling as next-token prediction. In The 4th Workshop on Mathematical Reasoning and AI at NeurIPS'24, 2024e. URL https://openreview.net/forum?id=CxHRoTLmPX." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 289, + 541, + 315 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 289, + 541, + 315 + ], + "spans": [ + { + "bbox": [ + 69, + 289, + 541, + 315 + ], + "type": "text", + "content": "Lunjun Zhang, Arian Hosseini, Hritik Bansal, Mehran Kazemi, Aviral Kumar, and Rishabh Agarwal. Generative verifiers: Reward modeling as next-token prediction. arXiv preprint arXiv:2408.15240, 2024f." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 320, + 541, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 320, + 541, + 346 + ], + "spans": [ + { + "bbox": [ + 70, + 320, + 541, + 346 + ], + "type": "text", + "content": "Qizhen Zhang, Chris Lu, Animesh Garg, and Jakob Foerster. Centralized model and exploration policy for multi-agent rl. arXiv preprint arXiv:2107.06434, 2021." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 352, + 541, + 390 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 352, + 541, + 390 + ], + "spans": [ + { + "bbox": [ + 70, + 352, + 541, + 390 + ], + "type": "text", + "content": "Wentao Zhang, Lingxuan Zhao, Haochong Xia, Shuo Sun, Jiaze Sun, Molei Qin, Xinyi Li, Yuqing Zhao, Yilei Zhao, Xinyu Cai, et al. Finagent: A multimodal foundation agent for financial trading: Tool-augmented, diversified, and generalist. arXiv preprint arXiv:2402.18485, 2024g." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 396, + 541, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 396, + 541, + 434 + ], + "spans": [ + { + "bbox": [ + 70, + 396, + 541, + 434 + ], + "type": "text", + "content": "Xuan Zhang, Chao Du, Tianyu Pang, Qian Liu, Wei Gao, and Min Lin. Chain of preference optimization: Improving chain-of-thought reasoning in llms. CoRR, abs/2406.09136, 2024h. doi: 10.48550/ARXIV.2406.09136. URL https://doi.org/10.48550/arXiv.2406.09136." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 440, + 541, + 466 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 440, + 541, + 466 + ], + "spans": [ + { + "bbox": [ + 70, + 440, + 541, + 466 + ], + "type": "text", + "content": "Xuanliang Zhang, Dingzirui Wang, Longxu Dou, Qingfu Zhu, and Wanxiang Che. A survey of table reasoning with large language models. Frontiers of Computer Science, 19(9):199348, 2025a." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 472, + 541, + 508 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 472, + 541, + 508 + ], + "spans": [ + { + "bbox": [ + 70, + 472, + 541, + 508 + ], + "type": "text", + "content": "Yufeng Zhang, Fengzhuo Zhang, Zhuoran Yang, and Zhaoran Wang. What and how does in-context learning learn? bayesian model averaging, parameterization, and generalization. arXiv preprint arXiv:2305.19420, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 516, + 541, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 516, + 541, + 578 + ], + "spans": [ + { + "bbox": [ + 70, + 516, + 541, + 578 + ], + "type": "text", + "content": "Yunxiang Zhang, Muhammad Khalifa, Lajanugen Logeswaran, Jaekyeom Kim, Moontae Lee, Honglak Lee, and Lu Wang. Small language models need strong verifiers to self-correct reasoning. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Findings of the Association for Computational Linguistics: ACL 2024, pp. 15637-15653, Bangkok, Thailand, August 2024i. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-acl.924. URL https://aclanthology.org/2024-findings-acl.924/." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 583, + 541, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 583, + 541, + 658 + ], + "spans": [ + { + "bbox": [ + 70, + 583, + 541, + 658 + ], + "type": "text", + "content": "Zhehao Zhang, Yan Gao, and Jian-Guang Lou. " + }, + { + "bbox": [ + 70, + 583, + 541, + 658 + ], + "type": "inline_equation", + "content": "e^5" + }, + { + "bbox": [ + 70, + 583, + 541, + 658 + ], + "type": "text", + "content": ": Zero-shot hierarchical table analysis using augmented LLMs via explain, extract, execute, exhibit and extrapolate. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 1244-1258, Mexico City, Mexico, June 2024j. Association for Computational Linguistics. doi: 10.18653/v1/2024.naacl-long.68. URL https://aclanthology.org/2024.naacl-long.68/." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 663, + 541, + 701 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 663, + 541, + 701 + ], + "spans": [ + { + "bbox": [ + 70, + 663, + 541, + 701 + ], + "type": "text", + "content": "Zhenru Zhang, Chujie Zheng, Yangzhen Wu, Beichen Zhang, Runji Lin, Bowen Yu, Dayiheng Liu, Jingren Zhou, and Junyang Lin. The lessons of developing process reward models in mathematical reasoning. arXiv preprint arXiv:2501.07301, 2025b." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 70, + 707, + 541, + 733 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 707, + 541, + 733 + ], + "spans": [ + { + "bbox": [ + 70, + 707, + 541, + 733 + ], + "type": "text", + "content": "Ruochen Zhao, Xingxuan Li, Shafiq Joty, Chengwei Qin, and Lidong Bing. Verify-and-edit: A knowledge-enhanced chain-of-thought framework. arXiv preprint arXiv:2305.03268, 2023." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "text", + "content": "73" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 72 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 81, + 541, + 732 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 70, + 81, + 541, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 81, + 541, + 118 + ], + "spans": [ + { + "bbox": [ + 70, + 81, + 541, + 118 + ], + "type": "text", + "content": "Chujie Zheng, Zhenru Zhang, Beichen Zhang, Runji Lin, Keming Lu, Bowen Yu, Dayiheng Liu, Jingren Zhou, and Junyang Lin. Processbench: Identifying process errors in mathematical reasoning. CoRR, abs/2412.06559, 2024. URL https://arxiv.org/abs/2412.06559." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 127, + 541, + 213 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 127, + 541, + 213 + ], + "spans": [ + { + "bbox": [ + 70, + 127, + 541, + 213 + ], + "type": "text", + "content": "Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric P. Xing, Hao Zhang, Joseph E. Gonzalez, and Ion Stoica. Judging llmas-a-judge with mt-bench and chatbot arena. In Alice Oh, Tristan Naumann, Amir Globerson, Kate Saenko, Moritz Hardt, and Sergey Levine (eds.), Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023a. URL http://papers.nips.cc/paper_files/paper/2023/bit/91f18a1287b398d378ef22505bf41832-AAbstract-Datasets_and_Benchmarks.html." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 221, + 541, + 257 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 221, + 541, + 257 + ], + "spans": [ + { + "bbox": [ + 70, + 221, + 541, + 257 + ], + "type": "text", + "content": "Rui Zheng, Shihan Dou, Songyang Gao, Yuan Hua, Wei Shen, Binghai Wang, Yan Liu, Senjie Jin, Qin Liu, Yuhao Zhou, et al. Secrets of rlhf in large language models part i: Ppo. arXiv preprint arXiv:2307.04964, 2023b." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 267, + 541, + 317 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 267, + 541, + 317 + ], + "spans": [ + { + "bbox": [ + 70, + 267, + 541, + 317 + ], + "type": "text", + "content": "Andy Zhou, Kai Yan, Michal Shlapentokh-Rothman, Haohan Wang, and Yu-Xiong Wang. Language agent tree search unifies reasoning, acting, and planning in language models. In Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pp. 62138-62160. PMLR, 21-27 Jul 2024a." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 325, + 541, + 375 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 325, + 541, + 375 + ], + "spans": [ + { + "bbox": [ + 70, + 325, + 541, + 375 + ], + "type": "text", + "content": "Denny Zhou, Nathanael Scharli, Le Hou, Jason Wei, Nathan Scales, Xuezhi Wang, Dale Schuurmans, Claire Cui, Olivier Bousquet, Quoc V Le, and Ed H. Chi. Least-to-most prompting enables complex reasoning in large language models. In The Eleventh International Conference on Learning Representations, 2023a. URL https://openreview.net/forum?id=WZH7099tgfM." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 384, + 541, + 421 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 384, + 541, + 421 + ], + "spans": [ + { + "bbox": [ + 69, + 384, + 541, + 421 + ], + "type": "text", + "content": "Han Zhou, Xingchen Wan, Ruoxi Sun, Hamid Palangi, Shariq Iqbal, Ivan Vulic, Anna Korhonen, and Sercan Ö. Ark. Multi-agent design: Optimizing agents with better prompts and topologies, 2025a. URL https://arxiv.org/abs/2502.02533." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 430, + 541, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 430, + 541, + 479 + ], + "spans": [ + { + "bbox": [ + 70, + 430, + 541, + 479 + ], + "type": "text", + "content": "Pei Zhou, Jay Pujara, Xiang Ren, Xinyun Chen, Heng-Tze Cheng, Quoc V Le, Ed H. Chi, Denny Zhou, Swaroop Mishra, and Steven Zheng. SELF-DISCOVER: Large language models self-compose reasoning structures. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024b. URL https://openreview.net/forum?id=BR0vXhmzYK." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 488, + 541, + 524 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 488, + 541, + 524 + ], + "spans": [ + { + "bbox": [ + 70, + 488, + 541, + 524 + ], + "type": "text", + "content": "Yilun Zhou, Austin Xu, Peifeng Wang, Caiming Xiong, and Shafiq Joty. Evaluating judges as evaluators: The jetst's benchmark of llm-as-judges as test-time scaling evaluators. arXiv preprint arXiv:2504.15253, 2025b." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 534, + 541, + 572 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 534, + 541, + 572 + ], + "spans": [ + { + "bbox": [ + 70, + 534, + 541, + 572 + ], + "type": "text", + "content": "Yongchao Zhou, Andrei Ioan Muresanu, Ziwen Han, Keiran Paster, Silviu Pitis, Harris Chan, and Jimmy Ba. Large language models are human-level prompt engineers. In The Eleventh International Conference on Learning Representations, 2023b. URL https://openreview.net/forum?id=92gvk82DE-." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 580, + 541, + 618 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 580, + 541, + 618 + ], + "spans": [ + { + "bbox": [ + 70, + 580, + 541, + 618 + ], + "type": "text", + "content": "Yuxiang Zhou, Jiazheng Li, Yanzheng Xiang, Hanqi Yan, Lin Gui, and Yulan He. The mystery of in-context learning: A comprehensive survey on interpretation and analysis. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pp. 14365-14378, 2024c." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 627, + 541, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 627, + 541, + 665 + ], + "spans": [ + { + "bbox": [ + 70, + 627, + 541, + 665 + ], + "type": "text", + "content": "Qihao Zhu, Daya Guo, Zhihong Shao, Dejian Yang, Peiyi Wang, Runxin Xu, Y Wu, Yukun Li, Huazuo Gao, Shirong Ma, et al. Deepseek-coder-v2: Breaking the barrier of closed-source models in code intelligence. arXiv preprint arXiv:2406.11931, 2024a." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 673, + 541, + 699 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 673, + 541, + 699 + ], + "spans": [ + { + "bbox": [ + 69, + 673, + 541, + 699 + ], + "type": "text", + "content": "Ying Zhu, Shengchang Li, Ziqian Kong, and Peilan Xu. Graph retrieval augmented trustworthiness reasoning. arXiv preprint arXiv:2408.12333, 2024b." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 707, + 541, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 707, + 541, + 732 + ], + "spans": [ + { + "bbox": [ + 69, + 707, + 541, + 732 + ], + "type": "text", + "content": "Mingchen Zhuge, Wenyi Wang, Louis Kirsch, Francesco Faccio, Dmitrii Khizbullin, and Jürgen Schmidhuber. Language agents as estimizable graphs, 2024. URL https://arxiv.org/abs/2402.16823." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "74" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 73 + }, + { + "para_blocks": [ + { + "bbox": [ + 70, + 81, + 541, + 218 + ], + "type": "list", + "angle": 0, + "index": 3, + "blocks": [ + { + "bbox": [ + 70, + 81, + 541, + 143 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 81, + 541, + 143 + ], + "spans": [ + { + "bbox": [ + 70, + 81, + 541, + 143 + ], + "type": "text", + "content": "Jingming Zhuo, Songyang Zhang, Xinyu Fang, Haodong Duan, Dahua Lin, and Kai Chen. ProSA: Assessing and understanding the prompt sensitivity of LLMs. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (eds.), Findings of the Association for Computational Linguistics: EMNLP 2024, pp. 1950-1976, Miami, Florida, USA, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.findings-emnlp.108. URL https://aclanthology.org/2024 findings-emnlp.108/." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 149, + 541, + 186 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 149, + 541, + 186 + ], + "spans": [ + { + "bbox": [ + 70, + 149, + 541, + 186 + ], + "type": "text", + "content": "Daniel M. Ziegler, Nisan Stiennon, Jeffrey Wu, Tom B. Brown, Alec Radford, Dario Amodei, Paul F. Christiano, and Geoffrey Irving. Fine-tuning language models from human preferences. CoRR, abs/1909.08593, 2019. URL http://arxiv.org/abs/1909.08593." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 193, + 541, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 193, + 541, + 218 + ], + "spans": [ + { + "bbox": [ + 70, + 193, + 541, + 218 + ], + "type": "text", + "content": "Kaijian Zou, Muhammad Khalifa, and Lu Wang. Retrieval or global context understanding? on many-shot in-context learning for long-context evaluation. arXiv preprint arXiv:2411.07130, 2024." + } + ] + } + ], + "index": 2 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "75" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 74 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09130/c8e9b00e-528a-4dec-8df2-9d7d8a1767a5_content_list.json b/data/2025/2504_09xxx/2504.09130/c8e9b00e-528a-4dec-8df2-9d7d8a1767a5_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..1133055e8242f666b461fbdbb6f30d6fae8d25b9 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09130/c8e9b00e-528a-4dec-8df2-9d7d8a1767a5_content_list.json @@ -0,0 +1,1525 @@ +[ + { + "type": "text", + "text": "VisuoThink: Empowering LVLM Reasoning with Multimodal Tree Search", + "text_level": 1, + "bbox": [ + 115, + 89, + 882, + 111 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yikun Wang $^{12}$ , Siyin Wang $^{12}$ , Qinyuan Cheng $^{1}$ , Zhaoye Fei $^{1}$ , Liang Ding $^{3}$ , Qipeng Guo $^{24}$ , Dacheng Tao $^{5}$ , Xipeng Qiu $^{\\dagger 12}$", + "bbox": [ + 159, + 130, + 836, + 165 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Fudan University $^{2}$ Shanghai Innovation Institute", + "bbox": [ + 292, + 171, + 702, + 187 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3 The University of Sydney 4 Shanghai AI Laboratory 5 Nanyang Technological University yikunwang19@fudan.edu.cn", + "bbox": [ + 127, + 187, + 867, + 221 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 260, + 260, + 339, + 275 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent advancements in Large Vision-Language Models have showcased remarkable capabilities. However, they often falter when confronted with complex reasoning tasks that humans typically address through visual aids and deliberate, step-by-step thinking. While existing methods have explored text-based slow thinking or rudimentary visual assistance, they fall short of capturing the intricate, interleaved nature of human visual-verbal reasoning processes. To overcome these limitations and inspired by the mechanisms of slow thinking in human cognition, we introduce VisuoThink, a novel framework that seamlessly integrates visuospatial and linguistic domains. VisuoThink facilitates multimodal slow thinking by enabling progressive visual-textual reasoning and incorporates test-time scaling through look-ahead tree search. Extensive experiments demonstrate that VisuoThink significantly enhances reasoning capabilities via inference-time scaling, even without fine-tuning, achieving state-of-the-art performance in tasks involving geometry and spatial reasoning. Our code has been open-sourced at https://github.com/ekonwang/VisuoThink.", + "bbox": [ + 141, + 292, + 460, + 661 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 114, + 678, + 258, + 694 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent advances in Large Vision-Language Models (LVLMs) (OpenAI, 2024a; Team, 2024) have shown remarkable progress across a variety of tasks. However, these models often struggle with complex reasoning challenges, such as geometric problem-solving (Qiao et al., 2024; Cherian et al., 2024) or spatial reasoning (Ramakrishnan et al., 2024; Wu et al., 2024), where human problem-solving approaches typically rely on visual aids. For example, when solving geometry problems, humans often iteratively sketch auxiliary lines or", + "bbox": [ + 112, + 706, + 489, + 883 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/b71e7927fc17a4de44c8768ddd0efc75420e0d30f5c8c396b6e61ee8eb632908.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 522, + 262, + 870, + 307 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/c2f0c45a628a1f3c0208077c01253fb50db99a5005e6237aec7ea485fefab551.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 522, + 311, + 870, + 365 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/e6c77e8ae3dba5f734011284a7f9ec8b6a31587cd454c7757bc86e754e59371e.jpg", + "image_caption": [ + "Figure 1: Illustration of Input-Output Prompting, CoT, Vision-aided Thought and our VisuoThink. Vision-aided Thought often relies on reasoning with one-step or unreliable multi-step visual cues (generated by LVLMs). While VisuoThink addresses this gap through tool-augmented visual hints, coupled with a predictive-rollout search mechanism to systematically optimize reasoning capability." + ], + "image_footnote": [], + "bbox": [ + 522, + 370, + 870, + 505 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "visualize intermediate steps, while exploring different reasoning paths - a form of \"slow thinking\" (Kahneman, 2011) that combines visual and verbal cognitive processes.", + "bbox": [ + 507, + 645, + 882, + 709 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "With the success of o1 series models (OpenAI, 2024b), researchers have explored language as a medium for implementing slow thinking, coupled with test-time scaling techniques (Zeng et al., 2024). Given the inherently multimodal nature of reality, early efforts (Xu et al., 2024; Thawakar et al., 2025; Yao et al., 2024; Du et al., 2025) have attempted to extend such deliberative thinking to multimodal reasoning. However, even augmented with search strategy, these methods treat visual information merely as static input, relying solely on textual reasoning chains during the reasoning process - creating a \"visual blind spot\", where the", + "bbox": [ + 507, + 711, + 882, + 921 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.09130v1 [cs.CL] 12 Apr 2025", + "bbox": [ + 21, + 307, + 60, + 724 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "\\*Yikun and Siyin contributed equally", + "bbox": [ + 136, + 892, + 364, + 907 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "† Corresponding Author", + "bbox": [ + 136, + 907, + 282, + 920 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "potential for visual information throughout the reasoning process is largely ignored (Fig. 1a). On the other hand, while approaches like VisualSketchpad (Hu et al., 2024) and VoT (Wu et al., 2024) have recognized the importance of visual information by incorporating visual aids in reasoning (Fig. 1b), they mainly focus on single-step assistance or simplified visual hints (e.g., emojis). These methods lack the multi-step visual-textual interleaved reasoning process that characterizes human slow thinking, while failing to explore potential search strategies.", + "bbox": [ + 112, + 84, + 489, + 275 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address these limitations, we propose VisuoThink, a multimodal tree search framework that systematically explores multiple reasoning paths with vision-text interleaved thinking at each step. Unlike previous approaches, Visuothink (Fig. 1c) enables multimodal slow thinking through two key innovations: (1) a step-by-step vision-text interleaved reasoning framework that dynamically utilizes multi-step visual aids from tool uses, and (2) a look-ahead tree search algorithm that explores multiple reasoning paths, enabling test-time scaling of the reasoning process. Specifically, our look-ahead tree search incorporates a predictive rollout mechanism that simulates the likely outcomes of different reasoning states. This allows the model to prioritize more promising paths and avoid less ones, guiding the reasoning process toward the optimal solution. Through this test-time scaling capability, the model can thoroughly explore and optimize reasoning paths dynamically during inference.", + "bbox": [ + 115, + 278, + 489, + 599 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our empirical evaluation demonstrates that Visuothink significantly outperforms existing methods across various reasoning tasks, particularly in geometry and spatial reasoning domains. On Geomeverse, Our methods achieves an accuracy@1 as high as $48.5\\%$ , with an improvement of as high as $21.8\\%$ over the state-of-the-art baseline, which particularly shows strong performance of VisuoThink on problems requiring multi-step visual reasoning. Through extensive ablation studies, we show that each component of our framework contributes meaningfully to its overall performance.", + "bbox": [ + 112, + 600, + 489, + 791 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In summary, our contributions include:", + "bbox": [ + 132, + 793, + 423, + 808 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose a novel reasoning paradigm, multimodal tree search, for multimodal slow thinking that enables dynamic integration of visual and verbal reasoning paths throughout the problem-solving search process.", + "- We extend test-time scaling methods to the vi" + ], + "bbox": [ + 136, + 815, + 489, + 921 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "sual domain by proposing a predictive rollout mechanism that explores and optimizes visual reasoning paths by predicting future states.", + "bbox": [ + 544, + 84, + 882, + 134 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- We demonstrate substantial empirical improvements across multiple reasoning tasks, particularly in geometry and spatial reasoning, with detailed analyses revealing key insights about our approach.", + "bbox": [ + 532, + 146, + 884, + 227 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 509, + 256, + 665, + 271 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1 Text-centric Reasoning in LVLMs", + "text_level": 1, + "bbox": [ + 509, + 282, + 821, + 299 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "With the emergence of o1 models (OpenAI, 2024b), the importance of slow thinking has become increasingly evident (Zeng et al., 2024). Several works have attempted to extend this to LVLMs through methods like stage-wise reasoning (Xuet al., 2024), curriculum learning (Thawakar et al., 2025), tree search-based data generation (Yao et al., 2024), and LLM distillation (Du et al., 2025). However, these methods treat visual information as static input, relying only on textual data during reasoning, which limits their ability to fully leverage multimodal information for complex tasks.", + "bbox": [ + 507, + 305, + 884, + 499 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2 Vision-aided Reasoning", + "text_level": 1, + "bbox": [ + 509, + 512, + 741, + 527 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Recent advancements in multimodal reasoning have demonstrated that incorporating visual information provides richer context and hints compared to text-only approaches. Early studies adopted a two-stage approach, where visual information is first transformed and grounded into text (Zhang et al., 2023), graph structures (e.g., scene graphs (Mitra et al., 2023) or knowledge graphs (Mondal et al., 2024)), or bounding boxes (Lei et al., 2024), followed by reasoning. Other works leverage existing vision models (e.g., segmentation, detection) to process input images into valuable cues for perception, enabling more precise image-understanding with fine-grained visual information (Yang et al., 2023; Zhou et al., 2024; Gao et al., 2024).", + "bbox": [ + 507, + 533, + 882, + 775 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Another sequence of research focuses on intermediate visual representations to enhance reasoning. For instance, Visual Sketchpad (Hu et al., 2024) employs Python-based drawing tools to generate sketches as intermediate visual aids for geometric problems, while VoT (Wu et al., 2024) formalizes visual thinking by generating emoji-like textual representations. MVOT (Li et al., 2025) fine-tunes multimodal models to generate images", + "bbox": [ + 507, + 777, + 884, + 921 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/29ed049967b9b04a6c7dcd056d68c10988ff9f39a5f29f8d545483f002908c51.jpg", + "image_caption": [ + "Figure 2: The illustration of our VisuoThink framework with three stages: (1) vision-text interleaved expansion: generates candidate paths through vision-text interleaved thinking; (2) rollout simulation: sample candidate reasoning nodes and then perform look-ahead search to better evaluate the value of current states; (3) selection: selects the most promising path via self-voting with results or states from rollout." + ], + "image_footnote": [], + "bbox": [ + 122, + 85, + 877, + 275 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "during reasoning, allowing the model to create visual aids dynamically. Despite these advancements, most existing methods rely on single-step or unreliable visual representations, lacking search mechanisms to test-time scaling through exploring multiple reasoning paths. In contrast, we develop a multimodal tree search framework that both leverages multi-step visual cues during reasoning and systematically explores reasoning paths through tree search.", + "bbox": [ + 112, + 370, + 489, + 530 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.3 Test-time Scaling with Tree Search", + "text_level": 1, + "bbox": [ + 112, + 542, + 431, + 557 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Scaling compute at test time has emerged as a powerful strategy to enhance LLMs' reasoning capabilities without increasing model parameters (Snell et al., 2024). Various approaches including BoN (Gui et al., 2024; Sun et al., 2024; Amini et al., 2024), guided beam search (Xie et al., 2023; Yu et al., 2023), and Monte Carlo Tree Search (MCTS) (Feng et al., 2023; Liu et al., 2023; Chen et al., 2024) have been explored for text models, demonstrating improved performance through different search strategies. However, the exploration of test-time scaling in LVLMs remains limited. Prior work like AtomThink (Xiang et al., 2024) has only investigated basic methods such as beam search, with text-only reasoning chains. In contrast, our method introduces vision-text interleaved thinking with look-ahead search, extending test-time scaling to multimodal reasoning.", + "bbox": [ + 112, + 563, + 489, + 853 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 VisuoThink", + "text_level": 1, + "bbox": [ + 112, + 864, + 250, + 878 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We propose VisuoThink, a novel framework for multimodal reasoning that dynamically integrates", + "bbox": [ + 112, + 889, + 487, + 921 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "visual and textual information during the inference process. At its core, our framework implements multimodal slow thinking through a key mechanism: predictive rollout search that allows models to think ahead.", + "bbox": [ + 507, + 370, + 884, + 451 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 Vision-Text Interleaved Thinking", + "text_level": 1, + "bbox": [ + 507, + 464, + 816, + 480 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our framework facilitates vision-text interleaved reasoning through an iterative cycle of Thought, Action, and Observation like existing work (Yao et al., 2023), which enables natural and dynamic interactions with external tools. (1) Thought phase: the model leverages visual information for textual reasoning (such as analyzing patterns based on previously added auxiliary lines) and determines the next step by planning what visual hints should be added to enhance understanding. (2) Action phase: the model executes the planned operations by calling external tools (like using Python code to draw auxiliary lines or highlight key features) to generate or modify visual information. (3) Observation phase: the model processes the visual feedback from the Action phase, incorporating these new visual hints into the next reasoning step.", + "bbox": [ + 507, + 486, + 884, + 759 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The importance of visual information for LVLM reasoning is highlighted in VisuoThink, which utilize tool invocations to construct reliable visual hints step by step in a visual construction process. This tool-based design allows VisuoThink to flexibly adapt to various visual reasoning tasks. Moreover, unlike approaches (e.g. VisualSketchpad) that generate all visual aids at once, our step-by-step visual guidance naturally integrates with search techniques, enabling effective test-time scaling.", + "bbox": [ + 507, + 760, + 885, + 921 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2 Predictive Rollout Search", + "text_level": 1, + "bbox": [ + 112, + 84, + 359, + 98 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Based on tree search methods and inspired by MCTS, we propose a predictive rollout search mechanism that interleaves visual-text thinking. By anticipating the outcomes of intermediate states, the model can make timely corrections, enabling more accurate and powerful reasoning. As shown in Figure 2, at each reasoning step, our framework first generates multiple candidate paths through vision-text interleaved thinking, then simulates these paths to predict their outcomes, and finally selects the most promising path through a self-voting mechanism.", + "bbox": [ + 112, + 105, + 489, + 297 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Vision-Text Interleaved Expansion In the whole reasoning chain $\\mathbf{A} = \\{\\mathbf{a}_1, \\mathbf{a}_2, \\dots, \\mathbf{a}_t\\}$ , given the current node $\\mathbf{a}_{t-1}$ , the model samples $k$ candidate nodes $\\mathbf{S}_t = \\{\\mathbf{s}_t^1, \\mathbf{s}_t^2, \\dots, \\mathbf{s}_t^k\\}$ . Each candidate follows the vision-text interleaved thinking process described above, generating a sequence of Thought, Action, and Observation steps. This expansion creates a tree of possible reasoning paths, each representing a different problem-solving strategy.", + "bbox": [ + 112, + 303, + 489, + 448 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Rollout Simulation Visual reasoning often requires multiple steps to reach a conclusion, making it crucial to evaluate the full potential of each path. For each candidate node $\\mathbf{s}_t^i$ , the model simulates the complete reasoning process to predict final outcomes $\\mathbf{r}_t^i$ , rather than relying solely on immediate state evaluation. Different from expansion, the simulation extends each candidate node with a single path of vision-text interleaved thinking until reaching a final result.", + "bbox": [ + 112, + 453, + 489, + 612 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Selection The selection of the optimal path is performed through a self-voting mechanism. The model considers the task description, historical nodes, and the simulated path with predicted results for each candidate node. The selection process can be formalized as:", + "bbox": [ + 112, + 618, + 489, + 714 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {S e l e c t} \\left(\\mathbf {S} _ {t}\\right) = \\underset {\\mathbf {s} _ {t} ^ {i} \\in \\mathbf {S} _ {t}} {\\arg \\max } \\mathbf {V o t e} \\left(\\mathbf {A} _ {t - 1}, \\mathbf {s} _ {t} ^ {i}, \\mathbf {r} _ {t} ^ {i}\\right) \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 164, + 722, + 487, + 746 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathbf{A}_{t - 1}$ represents the historical context, $\\mathbf{s}_t^i$ for the candidate node, and $\\mathbf{r}_t^i$ is the predicted result or final state. The Select is a heuristic function served by the LVLM model to guide the process. This selection ensures the model pursues the most promising reasoning strategy.", + "bbox": [ + 112, + 755, + 489, + 852 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4 Solving Geometry with VisuoThink", + "text_level": 1, + "bbox": [ + 112, + 863, + 453, + 879 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The core of our methodology is rooted in multi-step visual information processing and search-based rea", + "bbox": [ + 112, + 889, + 489, + 921 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "soning, enabling LVLMs to address strongly constrained mathematical problems (e.g., geometry challenges) and open-domain scenarios (such as visual navigation and visual tiling in section 5).", + "bbox": [ + 507, + 84, + 884, + 149 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We formalize geometry problem-solving as a two-phase process integrating visual construction and algebraic computation. In Phase I, the model generates auxiliary lines defined by geometric constraints, such as connecting points $(x_{i},y_{i})$ and $(x_{j},y_{j})$ , construct a perpendicular or parallel line to form line segments $\\mathbf{L} = \\{l_i\\}$ . This phase terminates with a AUX-END token, triggering Phase II, where geometric relationships are translated into solvable equations (e.g., $ax + b = 0$ ) through Python code execution.", + "bbox": [ + 507, + 149, + 884, + 326 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Task Formulation LVLM should produce the reasoning trajectory consisting of reasoning steps $\\mathbf{A} = \\{\\mathbf{a}_t\\}$ that leads to the final result $\\mathbf{r}$ , given the original problem $\\mathbf{Q}$ while taking into account the auxiliary lines $\\mathbf{L}$ . The framework operates under a constraint $\\sum_{t=1}^{|A|} \\| \\mathbf{a}_t \\| \\leq \\tau$ , where $\\mathbf{a}_t$ denotes visual-textual reasoning steps and $\\tau$ is the maximum step limit:", + "bbox": [ + 507, + 332, + 884, + 464 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {A} \\sim \\mathcal {P} \\left(\\left\\{\\mathbf {a} _ {1}, \\dots , \\mathbf {a} _ {| A |}, \\mathbf {r} \\right\\} \\mid \\mathbf {Q}, \\mathbf {L}\\right) \\text {s . t .} \\sum_ {t = 1} ^ {| \\mathbf {A} |} \\| \\mathbf {a} _ {i} \\| \\leq \\tau \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 524, + 487, + 882, + 523 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This formulation mirrors human problem-solving by decomposing proofs into executable visual-textual steps, validated via coordinate-based tools like matplotlib and equation solver.", + "bbox": [ + 507, + 537, + 882, + 601 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Visual Construction We emphasize the criticality of incremental visual information for accurate solutions, where multi-step graphical representations originate from the progressive construction of auxiliary lines. This multi-stage approach facilitates search algorithm-enhanced refinement of auxiliary line generation, significantly improving LVLM capabilities in geometric reasoning. Consistent with Sketchpad methodology, we exclusively utilize common Python libraries (e.g., matplotlib) for diagram rendering.", + "bbox": [ + 507, + 608, + 884, + 785 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Algebraic Computation Unlike general tasks, solving geometry problems cannot rely solely on visual construction or the model's inherent capabilities; instead, it necessitates the use of computational tools to achieve precise and accurate results. This requirement stems from the need for exact numerical solutions and the mitigation of potential errors in geometric reasoning. Through systematic", + "bbox": [ + 507, + 791, + 884, + 921 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/8beb56cc31f29e1a3c4943714634b9fb69453ae0a5a20d7250521aaf41b59cc2.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelGPT-4oQwen2-VL-72B-InstructClaude-3.5-sonnet
Geomverse-109CoT11.15.614.4
VisualSketchpad8.96.716.7
VisualSketchpad + Equation Solver13.311.117.8
VisuoThink w/o rollout search (ours)24.419.026.7
VisuoThink (ours)28.925.627.8
Geometry3K(Lu et al., 2021)CoT20.818.837.5
VisualSketchPad22.917.039.6
VisualSketchpad + Equation Solver25.014.941.7
VisuoThink w/o rollout search (ours)27.120.837.5
VisuoThink (ours)33.325.043.8
", + "bbox": [ + 157, + 80, + 840, + 233 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/7ec840b1973798c457f4be3a21bf34893e11feae173b9b3dfa750a44bdc86c48.jpg", + "table_caption": [ + "Table 1: The 1-shot benchmark results (Accuracy@1) on Geometry including Geomverse-109 and Geometry3k of SOTA large visual language models. For GPT-4o and Claude-3.5-sonnet, we employ newest cutoffs (gpt-4o-2024-11-20 and claude-3-5-sonnet-20241022) separately. The gray part indicates results from VisuoThink and bold results represent the best performance." + ], + "table_footnote": [], + "table_body": "
ModelDatasetVisual NavigationVisual Tiling
Subset (Num. Samples)level-3 (16)level-4 (31)level-5 (62)level-2 (119)
GPT-4oCoT18.83.20.00.8
VoT25.00.00.01.7
VoT + Executor62.59.74.812.6
VisuoThink w/o rollout search (ours)81.232.311.319.3
VisuoThink (ours)93.861.319.451.2
Qwen2-VL-72B-InstructCoT6.73.2-0.0
VoT0.00.0-0.8
VoT + Executor25.03.2-6.7
VisuoThink w/o rollout search (ours)50.06.5-9.2
VisuoThink (ours)81.312.9-20.2
Claude-3.5-sonnetCoT37.53.20.00.8
VoT56.30.00.02.5
VoT + Executor68.822.616.110.1
VisuoThink w/o rollout search (ours)81.238.741.980.7
VisuoThink (ours)93.861.353.284.0
", + "bbox": [ + 137, + 313, + 858, + 542 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 2: The Pass@1 performance comparison on spatial reasoning benchmarks including Visual Navigation and Visual Tiling across SOTA LVLMs. The gray part indicates results from VisuoThink and bold results represent the best performance. The results of Qwen2-VL-72B-Instruct on Visual Navigation ( $k = 5$ ) are masked out due to its restrained performance on the subset. The results from VoT with Executor are also reported, where the models utilize the unreliable visual hints generated by themselves rather than executor, consistent with the VoT framework.", + "bbox": [ + 112, + 552, + 882, + 625 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "integration, like VPD (Zhao et al., 2023), and VisualStechpad (Hu et al., 2024), phase II employs Python code execution for precise computation to mitigate LVLM hallucination risks. Furthermore, the model constructs single-variable algebraic equations based on identified geometric relationships, subsequently invoking equation solvers for numerical resolution.", + "bbox": [ + 112, + 640, + 489, + 769 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1 Empirical Results", + "text_level": 1, + "bbox": [ + 112, + 784, + 302, + 800 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Setup We conduct comprehensive evaluations on the challenging Geometry3K and Geomverse-109 datasets to demonstrate the methodological superiority. Especially we detail the trajectory of Geomverse-109 dataset synthesis in appendix E. SOTA closed-source models including gpt-4o2024-11-20 and claude-3-5-sonnet-20241022 are", + "bbox": [ + 112, + 808, + 489, + 921 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "leveraged for inference. To ensure architectural diversity, open-source model (e.g., Qwen2-VL-72B) were incorporated; however, smaller-parameter open-source variants were excluded due to their capability constraints. And we detail the model and algorithm hyperparameters in appendix D.", + "bbox": [ + 507, + 640, + 884, + 740 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Analysis Our empirical results reveal that, even without rollout search augmentation, our strategy substantially enhances LVLM reasoning capabilities compared to Chain-of-Thought (CoT) (MiTRA et al., 2023) and Visual Sketchpad (Hu et al., 2024) baselines. Notably, on the Geomverse-109 (Kazemi et al., 2023) benchmark, VisuoThink outperforms CoT and Visual Sketchpad by an average of $17.1\\%$ and $16.7\\%$ across all evaluated models, and predictive rollout search further", + "bbox": [ + 507, + 760, + 885, + 921 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/20b92721a14115ff4a0cef767e26085517041cdebeaa23a8db3c478939d7fe61.jpg", + "image_caption": [ + "Figure 3: The illustration of spatial reasoning tasks derived from VoT (Wu et al., 2024), including Visual Navigation and Visual Tiling. LVLM is required to execute a sequence of actions to complete certain goals. Our experimental setting makes them much more challenging and closer to real-environment deployment." + ], + "image_footnote": [], + "bbox": [ + 115, + 83, + 878, + 293 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "enhances models' performance by an average of $4.1\\%$ . Also, the employment of equation solver on Visual Sketchpad also increases an average performance of $3.3\\%$ . This performance gap likely stems from Geomverse's emphasis on geometric relationship construction, where our equation-solving framework helps to accurately get intermediate answers and enables efficient resolution of structurally complex problems. The systematic integration of geometric analysis tools further mitigates error propagation inherent in conventional LVLM reasoning baselines.", + "bbox": [ + 112, + 366, + 489, + 558 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5 Spatial Reasoning with VisuoThink", + "text_level": 1, + "bbox": [ + 112, + 574, + 453, + 590 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Spatial reasoning, defined as the cognitive capability to interpret spatial object relationships, motion dynamics, and environmental interactions, constitutes a foundational requirement for mission-critical applications such as robotic systems, autonomous navigation, and augmented reality. These domains demand robust integration of visual perception and precise manipulation of spatial-temporal constraints for optimal action planning.", + "bbox": [ + 112, + 602, + 489, + 763 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Task Formulation Building upon the Visualization of Thought (VoT) (Wu et al., 2024) benchmarks, we design two challenging spatial reasoning benchmarks with enhanced complexity as shown in figure 3: Visual Navigation and Visual Tiling. We provide detailed materials of the differences between the original VoT benchmark setup and our experimental configuration in Appendix B and additionally provide the mathematical task formu", + "bbox": [ + 112, + 776, + 489, + 921 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "lation in appendix C.", + "bbox": [ + 507, + 366, + 667, + 381 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Visual Construction via Executor During task execution, robots deployed in true environments typically receive environmental feedback following each action, which facilitates perception and subsequent decision-making processes. In our methodology, we leverage environmental interaction tools to enhance the model's spatial reasoning capabilities. In each action, we employ an executor to implement the corresponding action, and return textual execution feedback and visuospatial hint (optional) representing the map state. In the context of (1) Visual Navigation, the visual feedback corresponds to the map including agent's current position; while in (2) Visual Tiling scenarios, it represents the current state of rectangle occupation patterns.", + "bbox": [ + 507, + 390, + 884, + 632 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1 Empirical Results", + "text_level": 1, + "bbox": [ + 507, + 643, + 695, + 658 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Setup We evaluate our framework on two spatial reasoning benchmarks: Visual Navigation and Visual Tiling. For Visual Navigation, we create three difficulty levels with increasing map complexity, where the level indicates the $k$ for Visual Navigation as shown in table 2. For Visual Tiling, we focus on level-2 (i.e. $k = 2$ ) problems with 119 samples. We compare our method against Chain-of-Thought (CoT), Visualization of Thought (VoT) (Wu et al., 2024). As table 2 indicates, the results from VoT with tool interactions (i.e. Executor) are also reported, where textual feedbacks are employed but the visual hints are still generated by the model rather from executor, consistent with the VoT framework. The source of visual hints distinguishes it from our method. We employ the same", + "bbox": [ + 507, + 663, + 884, + 921 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/6d288c79a237e2ab479ac33116fe8a946a09484b0885513867ef20b7464ff990.jpg", + "image_caption": [ + "Figure 4: (LEFT) The trend of Pass@1 rate on Visual Navigation as the number of reasoning steps increases. (RIGHT) The relationship between the Accuracy@1 on geometry problems (Geomverse) and tree width for rollout search. We observe that LVLMs significantly benefit from longer reasoning chains, although the effect plateaus rapidly beyond a certain threshold of reasoning steps. The relationship between performance and tree width exhibits a more complex pattern, demonstrating an inverted U-shaped trend with both GPT-4o and Claude-3.5-Sonnet." + ], + "image_footnote": [], + "bbox": [ + 149, + 84, + 495, + 272 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/9431609d3275408fd7e42656f19f0a2fd6844e082a9d6e1281f04a2478c4525c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 497, + 83, + 843, + 272 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "temperature and VisuoThink hyperparameters as section 4.1.", + "bbox": [ + 112, + 390, + 487, + 420 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Analysis In spatial reasoning experiments, VisuoThink demonstrates significant performance improvements over baseline methods, particularly when augmented with predictive rollout search. As shown in Table 2, VisuoThink achieves the highest accuracy across all tasks, outperforming both CoT and VoT baselines. For instance, on the Visual Navigation task, VisuoThink on GPT-4o achieves a $93.8\\%$ accuracy at level-3, compared to $62.5\\%$ for VoT with an executor and $18.8\\%$ for CoT. This trend is consistent across different model architectures, including GPT-4o, Qwen2-VL-72B-Instruct, and Claude-3.5-sonnet, highlighting the robustness of our approach.", + "bbox": [ + 112, + 437, + 489, + 661 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Similar to the geometry experiments in Section 4, the integration of tool interactions and multi-step visual reasoning plays a critical role in enhancing performance. The executor's feedback mechanism, which provides visual updates after each action, mirrors the incremental visual refinement seen in geometry tasks, where auxiliary lines are progressively constructed. For instance, VisuoThink without rollout search demonstrates an average improvement of $34.7\\%$ on Visual Tiling across diverse models. We observe that while VoT augmented with textual feedback achieves an average increase of $8.1\\%$ , its performance gain is notably less pronounced compared to VisuoThink without rollout search. This underscores the critical role of reliable visual cues in enhancing reasoning capa", + "bbox": [ + 112, + 663, + 489, + 921 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "bilities. The dynamic interaction allows the model to iteratively refine its reasoning path, leading to more accurate solutions.", + "bbox": [ + 507, + 390, + 882, + 437 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "6 Discussion", + "text_level": 1, + "bbox": [ + 507, + 453, + 636, + 468 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In this section, we analyze key aspects of VisuoThink's performance. We examine how the length of reasoning chain affects spatial reasoning, the impact of child node expansion in rollout search, and the influence of supervision levels in predictive rollouts across tasks. These insights highlight VisuoThink's effectiveness and suggest future directions for multimodal reasoning frameworks.", + "bbox": [ + 507, + 481, + 884, + 609 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "6.1 Could Longer Reasoning Chains Assist LVLMs in Reasoning?", + "text_level": 1, + "bbox": [ + 507, + 624, + 863, + 656 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In practical applications of $LVLMs$ for spatial reasoning tasks, each tool invocation can be seen as an agent attempting an action in the environment and receiving feedback. Although many attempts may be inaccurate, allowing the model more trial-and-error opportunities before achieving the final goal could potentially enhance its reasoning capabilities. By setting different upper limits on the number of reasoning steps in visual navigation tasks, we observe a positive correlation between the number of reasoning steps and the model's task completion rate. This suggests that the model indeed benefits from more tool invocations and longer reasoning.", + "bbox": [ + 505, + 662, + 884, + 888 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "However, as the number of reasoning steps increases, the completion rate gradually converges,", + "bbox": [ + 507, + 889, + 882, + 921 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "making further significant improvements challenging. As shown in figure 4 (left), for instance, increasing reasoning steps from 10 to 20 resulted in substantial performance gains $(+54.1\\%$ and $+48.4\\%)$ across different LVLM architectures (GPT-4o and Claude-3.5-sonnet). However, when reasoning steps were increased from 20 to 40, the performance growth slowed dramatically, dropping to $+6.5\\%$ and $+2.1\\%$ , respectively. This phenomenon aligns with expectations, as merely increasing the number of tool invocations does not enable the model to better solve the most challenging samples. This underscores the necessity of techniques like rollout search within the broader context of test scaling.", + "bbox": [ + 112, + 84, + 492, + 326 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6.2 Could Larger Tree Span Enhances VisuoThink's Performance?", + "text_level": 1, + "bbox": [ + 112, + 370, + 433, + 401 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Predictive rollouts enhance the model's reasoning capabilities, which can be viewed as a tangible outcome of successfully expanding the model's reasoning search space. A natural question arises: Can we further improve the model's reasoning performance on benchmarks simply by increasing the number of candidate child nodes at each selection step, i.e., expanding the tree width, thereby enhancing model's reasoning capability? To investigate this, we conducted comparative experiments on geometry tasks using GPT-4o and Claude-3.5-sonnet, keeping the depth of the reasoning tree constant while varying the number of candidate child nodes.", + "bbox": [ + 112, + 428, + 489, + 638 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "As presented in figure 4 (right), we observed an inverted U-shaped trend in overall performance as the number of candidate tree nodes increased across different model architectures. Notably, when the number of candidate child nodes equals 1, the model follows a single reasoning path, effectively bypassing predictive rollout search. Contrary to expectations, the performance trend initially rises and then declines. This counterintuitive result can be attributed to the inherent errors in the model's evaluation of child nodes. Simply and aggressively increasing the tree width leads to confusion in selecting child nodes, which in turn reduces overall reasoning efficiency. Thus, an interesting conclusion emerges: we cannot expect to continuously improve model performance by merely increasing the number of child nodes in rollout search.", + "bbox": [ + 112, + 646, + 489, + 921 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/cd4ff9015c99503804b28e55daf60fd50c95fbfc7ca470586add4f184fcd4796.jpg", + "image_caption": [ + "Figure 5: The performance gain $(+ \\%)$ on tasks through predictive rollout search. The performance gain is calculated via the performance gap between VisuoThink (w/o rollout search) and VisuoThink." + ], + "image_footnote": [], + "bbox": [ + 522, + 84, + 858, + 269 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6.3 Strong v.s. Weak Supervision in Predictive Rollout Search", + "text_level": 1, + "bbox": [ + 507, + 362, + 882, + 393 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "An intriguing observation is that the strength of guidance provided by predictive rollout results varies between geometry and spatial reasoning tasks. In geometry tasks, the model only receives the final numerical results of the problem, whereas in spatial reasoning tasks, the model has access to visual states of stronger supervision (e.g., the agent's final position, the position of the destination, etc.). In other words, predictive rollouts in geometry tasks offer weaker supervision, while those in spatial reasoning tasks provide stronger supervision.", + "bbox": [ + 505, + 399, + 882, + 592 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This observation aligns with the findings of the Deepseek R1 report, which highlights that outcome-based supervision in RL can significantly enhance Deepseek-R1-Zero's reasoning capabilities (DeepSeek-AI, 2025). The effectiveness of such supervision stems from its strong supervisory signal, and predictive rollouts with strong supervision are more effective in improving model reasoning performance. This is further supported by our experimental results, as illustrated in figure 5, where predictive rollouts demonstrated more substantial performance gains in spatial reasoning tasks compared to geometry tasks, across both open-source and closed-source models. The detailed performance gain results are presented in appendix A.", + "bbox": [ + 507, + 593, + 884, + 851 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "7 Conclusion", + "text_level": 1, + "bbox": [ + 509, + 863, + 640, + 878 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We present VisuoThink, a multimodal tree search framework enhancing LVLM reasoning through", + "bbox": [ + 507, + 889, + 882, + 922 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "dynamic visual-textual interleaving and predictive rollout search. Our approach demonstrates significant improvements across geometry and spatial reasoning tasks without requiring model fine-tuning. Empirical results show substantial performance gains on geometry and spatial reasoning benchmarks. Our analysis reveals key insights about tool interaction benefits, search space optimization, and supervision strength in multimodal reasoning. These findings open new possibilities for advancing LVLM capabilities in complex reasoning tasks.", + "bbox": [ + 112, + 84, + 492, + 263 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Limitations", + "text_level": 1, + "bbox": [ + 114, + 273, + 220, + 288 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Despite its strong performance, VisuoThink has several limitations. First, the predictive rollout search process introduces significant computational overhead, making it potentially impractical for real-time applications. Second, our approach particularly relies on tool interactions for stronger capability, which may require more effort in some specific deployment environments. Third, the framework's effectiveness is constrained by the quality of the base VLM's reasoning capabilities - while it enhances performance, it cannot overcome fundamental model limitations. Finally, our evaluation focuses primarily on geometric and spatial reasoning tasks.", + "bbox": [ + 112, + 298, + 489, + 524 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Ethics and Reproducibility Statements", + "text_level": 1, + "bbox": [ + 114, + 535, + 452, + 551 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Ethics We take ethical considerations very seriously and strictly adhere to the ACL Ethics Policy. This paper proposes a test-time slow-thinking framework to improve the multimodal reasoning ability of current LVLMs. All evaluation datasets used in this paper will be publicly available or have been widely adopted by researchers. Thus, we believe that this research will not pose ethical issues.", + "bbox": [ + 112, + 560, + 489, + 690 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Reproducibility In this paper, we discuss the detailed experimental setup, such as hyperparameters, implementation of algorithm, and statistic descriptions. More importantly, we will open source our code and data in the future to help reproduce the experimental results of this paper.", + "bbox": [ + 112, + 697, + 489, + 810 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 510, + 83, + 608, + 98 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Afra Amini, Tim Vieira, and Ryan Cotterell. 2024. Variational best-of-n alignment. *ArXiv*, abs/2407.06057.", + "Guoxin Chen, Minpeng Liao, Chengxi Li, and Kai Fan. 2024. Alphamath almost zero: process supervision without process. ArXiv, abs/2405.03553.", + "Anoop Cherian, Kuan-Chuan Peng, Suhas Lohit, Joanna Matthiesen, Kevin Smith, and Joshua B Tenenbaum. 2024. Evaluating large vision-and-language models on children's mathematical olympiads. arXiv preprint arXiv:2406.15736.", + "DeepSeek-AI. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. Preprint, arXiv:2501.12948.", + "Yifan Du, Zikang Liu, Yifan Li, Wayne Xin Zhao, Yuqi Huo, Bingning Wang, Weipeng Chen, Zheng Liu, Zhongyuan Wang, and Jiahui Wen. 2025. Virgo: A preliminary exploration on reproducing o1-like mllm.", + "Xidong Feng, Ziyu Wan, Muning Wen, Ying Wen, Weinan Zhang, and Jun Wang. 2023. Alphazero-like tree-search can guide large language model decoding and training. ArXiv, abs/2309.17179.", + "Timin Gao, Peixian Chen, Mengdan Zhang, Chaoyou Fu, Yunhang Shen, Yan Zhang, Shengchuan Zhang, Xiawu Zheng, Xing Sun, Liujuan Cao, and Rongrong Ji. 2024. Cantor: Inspiring multimodal chain-of-thought of mllm. ArXiv, abs/2404.16033.", + "Lin Gui, Cristina Garbacea, and Victor Veitch. 2024. Bonbon alignment for large language models and the sweetness of best-of-n sampling. ArXiv, abs/2406.00832.", + "Yushi Hu, Weijia Shi, Xingyu Fu, Dan Roth, Mari Ostendorf, Luke S. Zettlemoyer, Noah A. Smith, and Ranjay Krishna. 2024. Visual sketchpad: Sketching as a visual chain of thought for multimodal language models. ArXiv, abs/2406.09403.", + "Daniel Kahneman. 2011. Thinking, fast and slow. Farrar, Straus and Giroux.", + "Mehran Kazemi, Hamidreza Alvari, Ankit Anand, Jialin Wu, Xi Chen, and Radu Soricut. 2023. Geomverse: A systematic evaluation of large models for geometric reasoning. Preprint, arXiv:2312.12241.", + "Xuanyu Lei, Zonghan Yang, Xinrui Chen, Peng Li, and Yang Liu. 2024. Scaffolding coordinates to promote vision-language coordination in large multi-modal models. In International Conference on Computational Linguistics.", + "Chengzu Li, Wenshan Wu, Huanyu Zhang, Yan Xia, Shaoguang Mao, Li Dong, Ivan Vuli'c, and Furu Wei. 2025. Imagine while reasoning in space: Multimodal visualization-of-thought." + ], + "bbox": [ + 510, + 108, + 884, + 921 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Jiacheng Liu, Andrew Cohen, Ramakanth Pasunuru, Yejin Choi, Hannaneh Hajishirzi, and Asli Celikyilmaz. 2023. Don't throw away your value model! generating more preferable text with value-guided monte-carlo tree search decoding.", + "Pan Lu, Ran Gong, Shibiao Jiang, Liang Qiu, Siyuan Huang, Xiaodan Liang, and Song-Chun Zhu. 2021. Inter-gps: Interpretable geometry problem solving with formal language and symbolic reasoning. Preprint, arXiv:2105.04165.", + "Chancharik Mitra, Brandon Huang, Trevor Darrell, and Roei Herzig. 2023. Compositional chain-of-thought prompting for large multimodal models. 2024 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 14420-14431.", + "Debjyoti Mondal, Suraj Modi, Subhadarshi Panda, Ritraj Singh, and Godawari Sudhakar Rao. 2024. Kamcot: Knowledge augmented multimodal chain-of-thoughts reasoning. In AAAI Conference on Artificial Intelligence.", + "OpenAI. 2024a. Gpt-4o system card. Preprint, arXiv:2410.21276.", + "OpenAI. 2024b. Learning to reason with llms.", + "Runqi Qiao, Qiuna Tan, Guanting Dong, Minhui Wu, Chong Sun, Xiaoshuai Song, Zhuoma GongQue, Shanglin Lei, Zhe Wei, Miaoxuan Zhang, and 1 others. 2024. We-math: Does your large multimodal model achieve human-like mathematical reasoning? arXiv preprint arXiv:2407.01284.", + "Santhosh Kumar Ramakrishnan, Erik Wijmans, Philipp Kraehenbuehl, and Vladlen Koltun. 2024. Does spatial cognition emerge in frontier models? Preprint, arXiv:2410.06468.", + "Noah Shinn, Federico Cassano, Edward Berman, Ashwin Gopinath, Karthik Narasimhan, and Shunyu Yao. 2023. Reflexion: Language agents with verbal reinforcement learning. Preprint, arXiv:2303.11366.", + "Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. 2024. Scaling llm test-time compute optimally can be more effective than scaling model parameters. ArXiv, abs/2408.03314.", + "Hanshi Sun, Momin Haider, Ruiqi Zhang, Huitao Yang, Jiahao Qiu, Ming Yin, Mengdi Wang, Peter Bartlett, and Andrea Zanette. 2024. Fast best-of-n decoding via speculative rejection. ArXiv, abs/2410.20290.", + "Gemini Team. 2024. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. Preprint, arXiv:2403.05530.", + "Omkar Thawakar, Dinura Dissanayake, Ketan More, Ritesh Thawkar, Ahmed Heakl, Noor Ahsan, Yuhao Li, Mohammed Zumri, Jean Lahoud, Rao Muhammad Anwer, Hisham Cholakkal, Ivan Laptev, Mubarak Shah, Fahad Shahbaz Khan, and Salman H. Khan. 2025. Llamav-o1: Rethinking step-by-step visual reasoning in llms." + ], + "bbox": [ + 115, + 85, + 485, + 920 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Wenshan Wu, Shaoguang Mao, Yadong Zhang, Yan Xia, Li Dong, Lei Cui, and Furu Wei. 2024. Mind's eye of llms: Visualization-of-thought elicits spatial reasoning in large language models.", + "Kun Xiang, Zhili Liu, Zihao Jiang, Yunshuang Nie, Runhui Huang, Haoxiang Fan, Hanhui Li, Weiran Huang, Yihan Zeng, Jianhua Han, Lanqing Hong, Hang Xu, and Xiaodan Liang. 2024. Atomthink: A slow thinking framework for multimodal mathematical reasoning. Preprint, arXiv:2411.11930.", + "Yuxi Xie, Kenji Kawaguchi, Yiran Zhao, Xu Zhao, MingSung Kan, Junxian He, and Qizhe Xie. 2023. Self-evaluation guided beam search for reasoning. In Neural Information Processing Systems.", + "Guowei Xu, Peng Jin, Hao Li, Yibing Song, Lichao Sun, and Li Yuan. 2024. Llava-cot: Let vision language models reason step-by-step. ArXiv, abs/2411.10440.", + "Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Ehsan Azarnasab, Faisal Ahmed, Zicheng Liu, Ce Liu, Michael Zeng, and Lijuan Wang. 2023. Mm-react: Prompting chatgpt for multimodal reasoning and action. ArXiv, abs/2303.11381.", + "Huanjin Yao, Jiaxing Huang, Wenhao Wu, Jingyi Zhang, Yibo Wang, Shunyu Liu, Yingjie Wang, Yuxin Song, Haocheng Feng, Li Shen, and Dacheng Tao. 2024. Mulberry: Empowering mllm with o1-like reasoning and reflection via collective monte carlo tree search. ArXiv, abs/2412.18319.", + "Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik Narasimhan, and Yuan Cao. 2023. React: Synergizing reasoning and acting in language models. Preprint, arXiv:2210.03629.", + "Fei Yu, Anningzhe Gao, and Benyou Wang. 2023. Ovm, outcome-supervised value models for planning in mathematical reasoning. In NAACL-HLT.", + "Zhiyuan Zeng, Qinyuan Cheng, Zhangyue Yin, Bo Wang, Shimin Li, Yunhua Zhou, Qipeng Guo, Xuanjing Huang, and Xipeng Qiu. 2024. Scaling of search and learning: A roadmap to reproduce o1 from reinforcement learning perspective. Preprint, arXiv:2412.14135.", + "Zhuosheng Zhang, Aston Zhang, Mu Li, Hai Zhao, George Karypis, and Alexander J. Smola. 2023. Multimodal chain-of-thought reasoning in language models. Trans. Mach. Learn. Res., 2024.", + "Wenliang Zhao, Yongming Rao, Zuyan Liu, Benlin Liu, Jie Zhou, and Jiwen Lu. 2023. Unleashing text-to-image diffusion models for visual perception. Preprint, arXiv:2303.02153.", + "Qiji Zhou, Ruochen Zhou, Nike Hu, Panzhong Lu, Siyang Gao, and Yue Zhang. 2024. Image-of-thought prompting for visual reasoning refinement in multimodal large language models. ArXiv, abs/2405.13872." + ], + "bbox": [ + 512, + 85, + 882, + 904 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "A Performance Gain of VisuoThink Through Predictive Rollout Search", + "text_level": 1, + "bbox": [ + 114, + 83, + 453, + 118 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "This appendix quantifies the performance improvements achieved by integrating predictive rollout search into the VisuoThink framework across geometry and spatial reasoning tasks. The performance gain through predictive rollout search is derived by subtracting the performance of VisuoThink (w/o rollout search) from those of the VisuoThink on models.", + "bbox": [ + 112, + 149, + 489, + 277 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "As shown in Table 3, tasks with strong supervision (e.g., Visual Navigation and Visual Tiling) exhibit significantly higher gains compared to weak supervision tasks (e.g., Geometry $3K$ and Geomverse-109). For instance, under strong supervision, Claude-3.5-Sonnet achieves a $+25.1\\%$ improvement in Visual Navigation, while GPT-4o attains $+16.6\\%$ in Visual Tiling. In contrast, weak supervision tasks like Geomverse-109 only show modest gains (e.g., $+5.4\\%$ for GPT-4o).", + "bbox": [ + 112, + 300, + 489, + 463 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "B OKSpatial Reasoning Task Setting", + "text_level": 1, + "bbox": [ + 112, + 511, + 452, + 529 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Our formulation extends beyond VoT's basic requirements by mandating LVLMs to generate comprehensive operational specifications - for instance, requiring explicit output of both movement directions and precise step counts at each decision node. This advancement creates more realistic and functionally grounded spatial reasoning evaluations (e.g., robotic navigation emulation in real world).", + "bbox": [ + 112, + 560, + 489, + 690 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "This appendix details the task formulation differences between VisuoThink and baseline methods (Table 4 and Table 5). For Visual Navigation, VisuoThink requires fine-grained, executable and explicit specification of both direction and step count in action sequences, whereas VoT focuses solely on direction navigation. This formulation mirrors real-world robotic navigation, where precise movement planning is critical. Similarly, in Visual Tiling, VisuoThink mandates detailed actions, including polyomino variant types, block positions, and action types (e.g., \"fit\" or \"remove\"), while VoT simplifies the task by omitting variant specifications.", + "bbox": [ + 112, + 712, + 490, + 921 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "C Task Formulation of Spatial Reasoning Tasks", + "text_level": 1, + "bbox": [ + 509, + 83, + 882, + 115 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Building upon VoT (Wu et al., 2024) framework, our challenging benchmarks comprise:", + "bbox": [ + 507, + 124, + 882, + 156 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "- Visual Navigation evaluates LVLMs in a simulated 2D grid environment, where agents must navigate from initial position $\\mathbf{s}_0$ to destination $\\mathbf{s}_k$ through obstacle-laden paths. The formal problem is defined by grid map $\\mathbf{M}$ containing $k$ interconnected edges $\\mathbf{E} = \\{\\mathbf{e}(\\mathbf{s}_0,\\mathbf{s}_1),\\mathbf{e}(\\mathbf{s}_1,\\mathbf{s}_2),\\ldots ,\\mathbf{e}(\\mathbf{s}_{k - 1},\\mathbf{s}_k)\\}$ . The LVLM should generate a sequence of executable actions in json format $\\mathbf{A} = \\{(\\mathbf{d}_0,\\mathbf{l}_0),(\\mathbf{d}_1,\\mathbf{l}_1),\\ldots ,(\\mathbf{d}_{|\\mathbf{A}| - 1},\\mathbf{l}_{|\\mathbf{A}| - 1})\\}$ , where each tuple specifies movement direction $\\mathbf{d}_i$ and exact step count $\\mathbf{l}_i$ , governed by the policy:", + "bbox": [ + 531, + 165, + 884, + 375 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {a} _ {\\mathbf {t}} \\sim \\mathcal {P} \\left(\\mathbf {d} _ {t}, \\mathbf {l} _ {t} \\mid \\mathbf {A} _ {t - 1}, \\mathbf {M}\\right) \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 613, + 384, + 882, + 401 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "- Visual Tiling is a classic geometric reasoning challenge, this task assesses polyomino composition capabilities within confined rectangular regions $\\mathbf{R}$ masked by $k$ distinct polyominoes $\\mathbf{MP} = \\{\\mathbf{mp}_1,\\dots ,\\mathbf{mp}_k\\}$ . The LVLM must output action sequences $\\mathbf{a}_t = (\\mathbf{p}_t,\\{\\mathbf{b}_1,\\dots ,\\mathbf{b}_{|B|}\\},\\mathbf{a}\\mathbf{t}_t)$ , where $\\mathbf{p}_t$ and $\\mathbf{B} = \\{\\mathbf{b}_1,\\dots ,\\mathbf{b}_{|\\mathbf{B}|}\\}$ respectively indicate the selected polyomino type and the coordinates of the placement blocks. $\\mathbf{at}_t\\in \\{\\text{fit, remove}\\}$ indicates the action type modifying rectangular state $\\mathbf{R}_t$ , thus formalized as:", + "bbox": [ + 531, + 414, + 885, + 608 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {a} _ {t} \\sim \\mathcal {P} \\left(\\mathbf {p} _ {t}, \\mathbf {B}, \\mathbf {a t} _ {t} \\mid \\mathbf {R} _ {t - 1}, \\mathbf {M P}, \\mathbf {A} _ {t - 1} \\}\\right) \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 563, + 637, + 882, + 670 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Though the required actions are polyomino variant-aware as shown in table 5. As the polyomino variant type is implicitly expressed in the block positions, LVLM does not need to explicitly output it in actions anymore.", + "bbox": [ + 542, + 675, + 882, + 756 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/91b03a9f72e061c8c659c7721beb2d061621a2d5b30b55e226084a1e7228568a.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Supervision TypePerformance GainGPT-4oQwen2-VL-72BClaude-3.5-Sonnet
Strong SupervisionΔ Visual Navigation (%)+16.6+18.9+15.5
Δ Visual Tiling (%)+31.9+11.0+3.3
Δ Average (%)+24.3+15.0+9.4
Weak SupervisionΔ Geometry3K (%)+4.5+6.6+1.1
Δ Geomverse-109 (%)+6.2+4.2+6.3
Δ Average (%)+5.4+5.4+3.7
", + "bbox": [ + 196, + 80, + 801, + 181 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/fffc5b6abb65b39d44f783e6567ff333b981cb2198deaf81d309e62f10b77cef.jpg", + "table_caption": [ + "Table 3: Detailed performance gain of VisuoThink through predictive rollout search on benchmarks from Geometry and Spatial Reasoning over variable LVLM models." + ], + "table_footnote": [], + "table_body": "
MethodDirectionStepsTarget
Visual NavigationVoT×Navigate from the starting position
VisuoThinkto the destination.
", + "bbox": [ + 196, + 231, + 801, + 282 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/4c3f7c05a73cf0d6a4150e4bdd1954214451acc2c175ad7f630e3d5fdb677bbd.jpg", + "table_caption": [ + "Table 4: Visual Navigation task setting differences between VoT and VisuoThink." + ], + "table_footnote": [], + "table_body": "
MethodActionTarget
Polyomino TypeVariant TypeBlock PositionsAction Type
Visual TilingVoTTo identify the correct variant for a polyomino in one action.
VisuoThinkTo fill the rectangle with feasible polyomino variants.
", + "bbox": [ + 119, + 319, + 878, + 399 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Table 5: Visual Tiling task setting differences between VoT and VisuoThink.", + "bbox": [ + 235, + 407, + 757, + 423 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "D Model and VisuoThink Hyperparameters", + "text_level": 1, + "bbox": [ + 112, + 447, + 349, + 481 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "We detail the model and VisuoThink Hyperparameters:", + "bbox": [ + 112, + 491, + 489, + 521 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Model Hyperparameters To ensure experimental fairness, we uniformly constrained the number of reasoning steps (i.e., $\\tau$ , the depth of the reasoning tree) to 10 across all experiments. During predictive rollout search, we set the number of sampled child nodes to 3, and we discuss its impact in section 6.2.", + "bbox": [ + 112, + 533, + 489, + 645 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "VisuoThink Hyperparameters While VisuoThink employed a temperature of 0.8 when sampling child nodes, all other model invocations, including the baselines (e.g. CoT, VoT, VisualSketchpad, VisuoThink w/o rollout search), were conducted with temperature set to $O$ for frontier performance. During the voting phase, we similarly maintained a temperature of $O$ and implemented single-vote sampling, which not only reduced computational overhead in terms of model calls but also achieved comparable performance.", + "bbox": [ + 112, + 656, + 489, + 833 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "E Geomverse-109 Problem Generation Trajectory", + "text_level": 1, + "bbox": [ + 112, + 845, + 467, + 879 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "We establish a pipeline translating textual problems into problems with matplotlib-executable code. Be", + "bbox": [ + 112, + 889, + 489, + 921 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "yond the Geometry3K (Lu et al., 2021) dataset (48 problems) utilized in Sketchpad, we incorporate the D2 subset of Geomverse (Kazemi et al., 2023) to construct an slightly bigger dataset Geomverse-109 (90 problems). The original Geomverse dataset crucially includes annotated point coordinates essential for systematic problem synthesis. During the data synthesis phase, we first randomly choose 109 problems, then LVLMs generate corresponding high-quality Python code through LLM self-reflection (Shinn et al., 2023), then we filter out problems with poor diagram quality.", + "bbox": [ + 507, + 448, + 884, + 643 + ], + "page_idx": 11 + } +] \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09130/c8e9b00e-528a-4dec-8df2-9d7d8a1767a5_model.json b/data/2025/2504_09xxx/2504.09130/c8e9b00e-528a-4dec-8df2-9d7d8a1767a5_model.json new file mode 100644 index 0000000000000000000000000000000000000000..0525190e6a9f5e82603a660574e4cad3c9d9a20a --- /dev/null +++ b/data/2025/2504_09xxx/2504.09130/c8e9b00e-528a-4dec-8df2-9d7d8a1767a5_model.json @@ -0,0 +1,1951 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.116, + 0.09, + 0.883, + 0.112 + ], + "angle": 0, + "content": "VisuoThink: Empowering LVLM Reasoning with Multimodal Tree Search" + }, + { + "type": "text", + "bbox": [ + 0.161, + 0.131, + 0.838, + 0.166 + ], + "angle": 0, + "content": "Yikun Wang\\(^{12}\\), Siyin Wang\\(^{12}\\), Qinyuan Cheng\\(^{1}\\), Zhaoye Fei\\(^{1}\\), Liang Ding\\(^{3}\\), Qipeng Guo\\(^{24}\\), Dacheng Tao\\(^{5}\\), Xipeng Qiu\\(^{\\dagger 12}\\)" + }, + { + "type": "text", + "bbox": [ + 0.293, + 0.172, + 0.704, + 0.189 + ], + "angle": 0, + "content": "\\(^{1}\\) Fudan University \\(^{2}\\) Shanghai Innovation Institute" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.189, + 0.868, + 0.222 + ], + "angle": 0, + "content": "3 The University of Sydney 4 Shanghai AI Laboratory 5 Nanyang Technological University yikunwang19@fudan.edu.cn" + }, + { + "type": "title", + "bbox": [ + 0.261, + 0.261, + 0.341, + 0.276 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.142, + 0.293, + 0.461, + 0.662 + ], + "angle": 0, + "content": "Recent advancements in Large Vision-Language Models have showcased remarkable capabilities. However, they often falter when confronted with complex reasoning tasks that humans typically address through visual aids and deliberate, step-by-step thinking. While existing methods have explored text-based slow thinking or rudimentary visual assistance, they fall short of capturing the intricate, interleaved nature of human visual-verbal reasoning processes. To overcome these limitations and inspired by the mechanisms of slow thinking in human cognition, we introduce VisuoThink, a novel framework that seamlessly integrates visuospatial and linguistic domains. VisuoThink facilitates multimodal slow thinking by enabling progressive visual-textual reasoning and incorporates test-time scaling through look-ahead tree search. Extensive experiments demonstrate that VisuoThink significantly enhances reasoning capabilities via inference-time scaling, even without fine-tuning, achieving state-of-the-art performance in tasks involving geometry and spatial reasoning. Our code has been open-sourced at https://github.com/ekonwang/VisuoThink." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.679, + 0.26, + 0.695 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.707, + 0.49, + 0.884 + ], + "angle": 0, + "content": "Recent advances in Large Vision-Language Models (LVLMs) (OpenAI, 2024a; Team, 2024) have shown remarkable progress across a variety of tasks. However, these models often struggle with complex reasoning challenges, such as geometric problem-solving (Qiao et al., 2024; Cherian et al., 2024) or spatial reasoning (Ramakrishnan et al., 2024; Wu et al., 2024), where human problem-solving approaches typically rely on visual aids. For example, when solving geometry problems, humans often iteratively sketch auxiliary lines or" + }, + { + "type": "image", + "bbox": [ + 0.523, + 0.263, + 0.871, + 0.308 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.523, + 0.312, + 0.871, + 0.366 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.523, + 0.372, + 0.871, + 0.506 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.508, + 0.519, + 0.885, + 0.633 + ], + "angle": 0, + "content": "Figure 1: Illustration of Input-Output Prompting, CoT, Vision-aided Thought and our VisuoThink. Vision-aided Thought often relies on reasoning with one-step or unreliable multi-step visual cues (generated by LVLMs). While VisuoThink addresses this gap through tool-augmented visual hints, coupled with a predictive-rollout search mechanism to systematically optimize reasoning capability." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.646, + 0.884, + 0.71 + ], + "angle": 0, + "content": "visualize intermediate steps, while exploring different reasoning paths - a form of \"slow thinking\" (Kahneman, 2011) that combines visual and verbal cognitive processes." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.712, + 0.884, + 0.922 + ], + "angle": 0, + "content": "With the success of o1 series models (OpenAI, 2024b), researchers have explored language as a medium for implementing slow thinking, coupled with test-time scaling techniques (Zeng et al., 2024). Given the inherently multimodal nature of reality, early efforts (Xu et al., 2024; Thawakar et al., 2025; Yao et al., 2024; Du et al., 2025) have attempted to extend such deliberative thinking to multimodal reasoning. However, even augmented with search strategy, these methods treat visual information merely as static input, relying solely on textual reasoning chains during the reasoning process - creating a \"visual blind spot\", where the" + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.309, + 0.061, + 0.725 + ], + "angle": 270, + "content": "arXiv:2504.09130v1 [cs.CL] 12 Apr 2025" + }, + { + "type": "page_footnote", + "bbox": [ + 0.137, + 0.894, + 0.365, + 0.908 + ], + "angle": 0, + "content": "\\*Yikun and Siyin contributed equally" + }, + { + "type": "page_footnote", + "bbox": [ + 0.137, + 0.908, + 0.283, + 0.921 + ], + "angle": 0, + "content": "† Corresponding Author" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.085, + 0.49, + 0.277 + ], + "angle": 0, + "content": "potential for visual information throughout the reasoning process is largely ignored (Fig. 1a). On the other hand, while approaches like VisualSketchpad (Hu et al., 2024) and VoT (Wu et al., 2024) have recognized the importance of visual information by incorporating visual aids in reasoning (Fig. 1b), they mainly focus on single-step assistance or simplified visual hints (e.g., emojis). These methods lack the multi-step visual-textual interleaved reasoning process that characterizes human slow thinking, while failing to explore potential search strategies." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.279, + 0.49, + 0.6 + ], + "angle": 0, + "content": "To address these limitations, we propose VisuoThink, a multimodal tree search framework that systematically explores multiple reasoning paths with vision-text interleaved thinking at each step. Unlike previous approaches, Visuothink (Fig. 1c) enables multimodal slow thinking through two key innovations: (1) a step-by-step vision-text interleaved reasoning framework that dynamically utilizes multi-step visual aids from tool uses, and (2) a look-ahead tree search algorithm that explores multiple reasoning paths, enabling test-time scaling of the reasoning process. Specifically, our look-ahead tree search incorporates a predictive rollout mechanism that simulates the likely outcomes of different reasoning states. This allows the model to prioritize more promising paths and avoid less ones, guiding the reasoning process toward the optimal solution. Through this test-time scaling capability, the model can thoroughly explore and optimize reasoning paths dynamically during inference." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.601, + 0.49, + 0.793 + ], + "angle": 0, + "content": "Our empirical evaluation demonstrates that Visuothink significantly outperforms existing methods across various reasoning tasks, particularly in geometry and spatial reasoning domains. On Geomeverse, Our methods achieves an accuracy@1 as high as \\(48.5\\%\\), with an improvement of as high as \\(21.8\\%\\) over the state-of-the-art baseline, which particularly shows strong performance of VisuoThink on problems requiring multi-step visual reasoning. Through extensive ablation studies, we show that each component of our framework contributes meaningfully to its overall performance." + }, + { + "type": "text", + "bbox": [ + 0.133, + 0.794, + 0.424, + 0.809 + ], + "angle": 0, + "content": "In summary, our contributions include:" + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.816, + 0.489, + 0.898 + ], + "angle": 0, + "content": "- We propose a novel reasoning paradigm, multimodal tree search, for multimodal slow thinking that enables dynamic integration of visual and verbal reasoning paths throughout the problem-solving search process." + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.906, + 0.49, + 0.922 + ], + "angle": 0, + "content": "- We extend test-time scaling methods to the vi" + }, + { + "type": "list", + "bbox": [ + 0.137, + 0.816, + 0.49, + 0.922 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.545, + 0.085, + 0.883, + 0.135 + ], + "angle": 0, + "content": "sual domain by proposing a predictive rollout mechanism that explores and optimizes visual reasoning paths by predicting future states." + }, + { + "type": "text", + "bbox": [ + 0.533, + 0.147, + 0.885, + 0.228 + ], + "angle": 0, + "content": "- We demonstrate substantial empirical improvements across multiple reasoning tasks, particularly in geometry and spatial reasoning, with detailed analyses revealing key insights about our approach." + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.257, + 0.666, + 0.272 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.284, + 0.822, + 0.3 + ], + "angle": 0, + "content": "2.1 Text-centric Reasoning in LVLMs" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.306, + 0.885, + 0.5 + ], + "angle": 0, + "content": "With the emergence of o1 models (OpenAI, 2024b), the importance of slow thinking has become increasingly evident (Zeng et al., 2024). Several works have attempted to extend this to LVLMs through methods like stage-wise reasoning (Xuet al., 2024), curriculum learning (Thawakar et al., 2025), tree search-based data generation (Yao et al., 2024), and LLM distillation (Du et al., 2025). However, these methods treat visual information as static input, relying only on textual data during reasoning, which limits their ability to fully leverage multimodal information for complex tasks." + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.513, + 0.742, + 0.529 + ], + "angle": 0, + "content": "2.2 Vision-aided Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.535, + 0.884, + 0.776 + ], + "angle": 0, + "content": "Recent advancements in multimodal reasoning have demonstrated that incorporating visual information provides richer context and hints compared to text-only approaches. Early studies adopted a two-stage approach, where visual information is first transformed and grounded into text (Zhang et al., 2023), graph structures (e.g., scene graphs (Mitra et al., 2023) or knowledge graphs (Mondal et al., 2024)), or bounding boxes (Lei et al., 2024), followed by reasoning. Other works leverage existing vision models (e.g., segmentation, detection) to process input images into valuable cues for perception, enabling more precise image-understanding with fine-grained visual information (Yang et al., 2023; Zhou et al., 2024; Gao et al., 2024)." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.778, + 0.885, + 0.922 + ], + "angle": 0, + "content": "Another sequence of research focuses on intermediate visual representations to enhance reasoning. For instance, Visual Sketchpad (Hu et al., 2024) employs Python-based drawing tools to generate sketches as intermediate visual aids for geometric problems, while VoT (Wu et al., 2024) formalizes visual thinking by generating emoji-like textual representations. MVOT (Li et al., 2025) fine-tunes multimodal models to generate images" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.123, + 0.086, + 0.878, + 0.276 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.113, + 0.289, + 0.885, + 0.348 + ], + "angle": 0, + "content": "Figure 2: The illustration of our VisuoThink framework with three stages: (1) vision-text interleaved expansion: generates candidate paths through vision-text interleaved thinking; (2) rollout simulation: sample candidate reasoning nodes and then perform look-ahead search to better evaluate the value of current states; (3) selection: selects the most promising path via self-voting with results or states from rollout." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.372, + 0.49, + 0.531 + ], + "angle": 0, + "content": "during reasoning, allowing the model to create visual aids dynamically. Despite these advancements, most existing methods rely on single-step or unreliable visual representations, lacking search mechanisms to test-time scaling through exploring multiple reasoning paths. In contrast, we develop a multimodal tree search framework that both leverages multi-step visual cues during reasoning and systematically explores reasoning paths through tree search." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.543, + 0.433, + 0.558 + ], + "angle": 0, + "content": "2.3 Test-time Scaling with Tree Search" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.564, + 0.49, + 0.854 + ], + "angle": 0, + "content": "Scaling compute at test time has emerged as a powerful strategy to enhance LLMs' reasoning capabilities without increasing model parameters (Snell et al., 2024). Various approaches including BoN (Gui et al., 2024; Sun et al., 2024; Amini et al., 2024), guided beam search (Xie et al., 2023; Yu et al., 2023), and Monte Carlo Tree Search (MCTS) (Feng et al., 2023; Liu et al., 2023; Chen et al., 2024) have been explored for text models, demonstrating improved performance through different search strategies. However, the exploration of test-time scaling in LVLMs remains limited. Prior work like AtomThink (Xiang et al., 2024) has only investigated basic methods such as beam search, with text-only reasoning chains. In contrast, our method introduces vision-text interleaved thinking with look-ahead search, extending test-time scaling to multimodal reasoning." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.865, + 0.251, + 0.879 + ], + "angle": 0, + "content": "3 VisuoThink" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.89, + 0.489, + 0.922 + ], + "angle": 0, + "content": "We propose VisuoThink, a novel framework for multimodal reasoning that dynamically integrates" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.372, + 0.885, + 0.452 + ], + "angle": 0, + "content": "visual and textual information during the inference process. At its core, our framework implements multimodal slow thinking through a key mechanism: predictive rollout search that allows models to think ahead." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.465, + 0.818, + 0.481 + ], + "angle": 0, + "content": "3.1 Vision-Text Interleaved Thinking" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.487, + 0.885, + 0.76 + ], + "angle": 0, + "content": "Our framework facilitates vision-text interleaved reasoning through an iterative cycle of Thought, Action, and Observation like existing work (Yao et al., 2023), which enables natural and dynamic interactions with external tools. (1) Thought phase: the model leverages visual information for textual reasoning (such as analyzing patterns based on previously added auxiliary lines) and determines the next step by planning what visual hints should be added to enhance understanding. (2) Action phase: the model executes the planned operations by calling external tools (like using Python code to draw auxiliary lines or highlight key features) to generate or modify visual information. (3) Observation phase: the model processes the visual feedback from the Action phase, incorporating these new visual hints into the next reasoning step." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.761, + 0.886, + 0.922 + ], + "angle": 0, + "content": "The importance of visual information for LVLM reasoning is highlighted in VisuoThink, which utilize tool invocations to construct reliable visual hints step by step in a visual construction process. This tool-based design allows VisuoThink to flexibly adapt to various visual reasoning tasks. Moreover, unlike approaches (e.g. VisualSketchpad) that generate all visual aids at once, our step-by-step visual guidance naturally integrates with search techniques, enabling effective test-time scaling." + } + ], + [ + { + "type": "title", + "bbox": [ + 0.114, + 0.085, + 0.361, + 0.099 + ], + "angle": 0, + "content": "3.2 Predictive Rollout Search" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.106, + 0.49, + 0.298 + ], + "angle": 0, + "content": "Based on tree search methods and inspired by MCTS, we propose a predictive rollout search mechanism that interleaves visual-text thinking. By anticipating the outcomes of intermediate states, the model can make timely corrections, enabling more accurate and powerful reasoning. As shown in Figure 2, at each reasoning step, our framework first generates multiple candidate paths through vision-text interleaved thinking, then simulates these paths to predict their outcomes, and finally selects the most promising path through a self-voting mechanism." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.304, + 0.49, + 0.449 + ], + "angle": 0, + "content": "Vision-Text Interleaved Expansion In the whole reasoning chain \\(\\mathbf{A} = \\{\\mathbf{a}_1, \\mathbf{a}_2, \\dots, \\mathbf{a}_t\\}\\), given the current node \\(\\mathbf{a}_{t-1}\\), the model samples \\(k\\) candidate nodes \\(\\mathbf{S}_t = \\{\\mathbf{s}_t^1, \\mathbf{s}_t^2, \\dots, \\mathbf{s}_t^k\\}\\). Each candidate follows the vision-text interleaved thinking process described above, generating a sequence of Thought, Action, and Observation steps. This expansion creates a tree of possible reasoning paths, each representing a different problem-solving strategy." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.454, + 0.49, + 0.613 + ], + "angle": 0, + "content": "Rollout Simulation Visual reasoning often requires multiple steps to reach a conclusion, making it crucial to evaluate the full potential of each path. For each candidate node \\(\\mathbf{s}_t^i\\), the model simulates the complete reasoning process to predict final outcomes \\(\\mathbf{r}_t^i\\), rather than relying solely on immediate state evaluation. Different from expansion, the simulation extends each candidate node with a single path of vision-text interleaved thinking until reaching a final result." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.619, + 0.49, + 0.715 + ], + "angle": 0, + "content": "Selection The selection of the optimal path is performed through a self-voting mechanism. The model considers the task description, historical nodes, and the simulated path with predicted results for each candidate node. The selection process can be formalized as:" + }, + { + "type": "equation", + "bbox": [ + 0.165, + 0.724, + 0.488, + 0.747 + ], + "angle": 0, + "content": "\\[\n\\mathbf {S e l e c t} \\left(\\mathbf {S} _ {t}\\right) = \\underset {\\mathbf {s} _ {t} ^ {i} \\in \\mathbf {S} _ {t}} {\\arg \\max } \\mathbf {V o t e} \\left(\\mathbf {A} _ {t - 1}, \\mathbf {s} _ {t} ^ {i}, \\mathbf {r} _ {t} ^ {i}\\right) \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.756, + 0.49, + 0.853 + ], + "angle": 0, + "content": "where \\(\\mathbf{A}_{t - 1}\\) represents the historical context, \\(\\mathbf{s}_t^i\\) for the candidate node, and \\(\\mathbf{r}_t^i\\) is the predicted result or final state. The Select is a heuristic function served by the LVLM model to guide the process. This selection ensures the model pursues the most promising reasoning strategy." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.864, + 0.454, + 0.881 + ], + "angle": 0, + "content": "4 Solving Geometry with VisuoThink" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.89, + 0.49, + 0.922 + ], + "angle": 0, + "content": "The core of our methodology is rooted in multi-step visual information processing and search-based rea" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.085, + 0.885, + 0.15 + ], + "angle": 0, + "content": "soning, enabling LVLMs to address strongly constrained mathematical problems (e.g., geometry challenges) and open-domain scenarios (such as visual navigation and visual tiling in section 5)." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.151, + 0.885, + 0.327 + ], + "angle": 0, + "content": "We formalize geometry problem-solving as a two-phase process integrating visual construction and algebraic computation. In Phase I, the model generates auxiliary lines defined by geometric constraints, such as connecting points \\((x_{i},y_{i})\\) and \\((x_{j},y_{j})\\), construct a perpendicular or parallel line to form line segments \\(\\mathbf{L} = \\{l_i\\}\\). This phase terminates with a AUX-END token, triggering Phase II, where geometric relationships are translated into solvable equations (e.g., \\(ax + b = 0\\)) through Python code execution." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.334, + 0.885, + 0.465 + ], + "angle": 0, + "content": "Task Formulation LVLM should produce the reasoning trajectory consisting of reasoning steps \\(\\mathbf{A} = \\{\\mathbf{a}_t\\}\\) that leads to the final result \\(\\mathbf{r}\\), given the original problem \\(\\mathbf{Q}\\) while taking into account the auxiliary lines \\(\\mathbf{L}\\). The framework operates under a constraint \\(\\sum_{t=1}^{|A|} \\| \\mathbf{a}_t \\| \\leq \\tau\\), where \\(\\mathbf{a}_t\\) denotes visual-textual reasoning steps and \\(\\tau\\) is the maximum step limit:" + }, + { + "type": "equation", + "bbox": [ + 0.525, + 0.488, + 0.884, + 0.524 + ], + "angle": 0, + "content": "\\[\n\\mathbf {A} \\sim \\mathcal {P} \\left(\\left\\{\\mathbf {a} _ {1}, \\dots , \\mathbf {a} _ {| A |}, \\mathbf {r} \\right\\} \\mid \\mathbf {Q}, \\mathbf {L}\\right) \\text {s . t .} \\sum_ {t = 1} ^ {| \\mathbf {A} |} \\| \\mathbf {a} _ {i} \\| \\leq \\tau \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.538, + 0.884, + 0.602 + ], + "angle": 0, + "content": "This formulation mirrors human problem-solving by decomposing proofs into executable visual-textual steps, validated via coordinate-based tools like matplotlib and equation solver." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.609, + 0.885, + 0.787 + ], + "angle": 0, + "content": "Visual Construction We emphasize the criticality of incremental visual information for accurate solutions, where multi-step graphical representations originate from the progressive construction of auxiliary lines. This multi-stage approach facilitates search algorithm-enhanced refinement of auxiliary line generation, significantly improving LVLM capabilities in geometric reasoning. Consistent with Sketchpad methodology, we exclusively utilize common Python libraries (e.g., matplotlib) for diagram rendering." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.793, + 0.885, + 0.922 + ], + "angle": 0, + "content": "Algebraic Computation Unlike general tasks, solving geometry problems cannot rely solely on visual construction or the model's inherent capabilities; instead, it necessitates the use of computational tools to achieve precise and accurate results. This requirement stems from the need for exact numerical solutions and the mitigation of potential errors in geometric reasoning. Through systematic" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.158, + 0.082, + 0.842, + 0.234 + ], + "angle": 0, + "content": "
ModelGPT-4oQwen2-VL-72B-InstructClaude-3.5-sonnet
Geomverse-109CoT11.15.614.4
VisualSketchpad8.96.716.7
VisualSketchpad + Equation Solver13.311.117.8
VisuoThink w/o rollout search (ours)24.419.026.7
VisuoThink (ours)28.925.627.8
Geometry3K(Lu et al., 2021)CoT20.818.837.5
VisualSketchPad22.917.039.6
VisualSketchpad + Equation Solver25.014.941.7
VisuoThink w/o rollout search (ours)27.120.837.5
VisuoThink (ours)33.325.043.8
" + }, + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.244, + 0.885, + 0.304 + ], + "angle": 0, + "content": "Table 1: The 1-shot benchmark results (Accuracy@1) on Geometry including Geomverse-109 and Geometry3k of SOTA large visual language models. For GPT-4o and Claude-3.5-sonnet, we employ newest cutoffs (gpt-4o-2024-11-20 and claude-3-5-sonnet-20241022) separately. The gray part indicates results from VisuoThink and bold results represent the best performance." + }, + { + "type": "table", + "bbox": [ + 0.139, + 0.314, + 0.86, + 0.543 + ], + "angle": 0, + "content": "
ModelDatasetVisual NavigationVisual Tiling
Subset (Num. Samples)level-3 (16)level-4 (31)level-5 (62)level-2 (119)
GPT-4oCoT18.83.20.00.8
VoT25.00.00.01.7
VoT + Executor62.59.74.812.6
VisuoThink w/o rollout search (ours)81.232.311.319.3
VisuoThink (ours)93.861.319.451.2
Qwen2-VL-72B-InstructCoT6.73.2-0.0
VoT0.00.0-0.8
VoT + Executor25.03.2-6.7
VisuoThink w/o rollout search (ours)50.06.5-9.2
VisuoThink (ours)81.312.9-20.2
Claude-3.5-sonnetCoT37.53.20.00.8
VoT56.30.00.02.5
VoT + Executor68.822.616.110.1
VisuoThink w/o rollout search (ours)81.238.741.980.7
VisuoThink (ours)93.861.353.284.0
" + }, + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.553, + 0.883, + 0.626 + ], + "angle": 0, + "content": "Table 2: The Pass@1 performance comparison on spatial reasoning benchmarks including Visual Navigation and Visual Tiling across SOTA LVLMs. The gray part indicates results from VisuoThink and bold results represent the best performance. The results of Qwen2-VL-72B-Instruct on Visual Navigation (\\(k = 5\\)) are masked out due to its restrained performance on the subset. The results from VoT with Executor are also reported, where the models utilize the unreliable visual hints generated by themselves rather than executor, consistent with the VoT framework." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.641, + 0.49, + 0.77 + ], + "angle": 0, + "content": "integration, like VPD (Zhao et al., 2023), and VisualStechpad (Hu et al., 2024), phase II employs Python code execution for precise computation to mitigate LVLM hallucination risks. Furthermore, the model constructs single-variable algebraic equations based on identified geometric relationships, subsequently invoking equation solvers for numerical resolution." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.785, + 0.303, + 0.801 + ], + "angle": 0, + "content": "4.1 Empirical Results" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.809, + 0.49, + 0.922 + ], + "angle": 0, + "content": "Setup We conduct comprehensive evaluations on the challenging Geometry3K and Geomverse-109 datasets to demonstrate the methodological superiority. Especially we detail the trajectory of Geomverse-109 dataset synthesis in appendix E. SOTA closed-source models including gpt-4o2024-11-20 and claude-3-5-sonnet-20241022 are" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.641, + 0.885, + 0.741 + ], + "angle": 0, + "content": "leveraged for inference. To ensure architectural diversity, open-source model (e.g., Qwen2-VL-72B) were incorporated; however, smaller-parameter open-source variants were excluded due to their capability constraints. And we detail the model and algorithm hyperparameters in appendix D." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.761, + 0.887, + 0.922 + ], + "angle": 0, + "content": "Analysis Our empirical results reveal that, even without rollout search augmentation, our strategy substantially enhances LVLM reasoning capabilities compared to Chain-of-Thought (CoT) (MiTRA et al., 2023) and Visual Sketchpad (Hu et al., 2024) baselines. Notably, on the Geomverse-109 (Kazemi et al., 2023) benchmark, VisuoThink outperforms CoT and Visual Sketchpad by an average of \\(17.1\\%\\) and \\(16.7\\%\\) across all evaluated models, and predictive rollout search further" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.116, + 0.084, + 0.88, + 0.294 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.113, + 0.306, + 0.884, + 0.351 + ], + "angle": 0, + "content": "Figure 3: The illustration of spatial reasoning tasks derived from VoT (Wu et al., 2024), including Visual Navigation and Visual Tiling. LVLM is required to execute a sequence of actions to complete certain goals. Our experimental setting makes them much more challenging and closer to real-environment deployment." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.367, + 0.49, + 0.56 + ], + "angle": 0, + "content": "enhances models' performance by an average of \\(4.1\\%\\). Also, the employment of equation solver on Visual Sketchpad also increases an average performance of \\(3.3\\%\\). This performance gap likely stems from Geomverse's emphasis on geometric relationship construction, where our equation-solving framework helps to accurately get intermediate answers and enables efficient resolution of structurally complex problems. The systematic integration of geometric analysis tools further mitigates error propagation inherent in conventional LVLM reasoning baselines." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.575, + 0.454, + 0.592 + ], + "angle": 0, + "content": "5 Spatial Reasoning with VisuoThink" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.604, + 0.49, + 0.764 + ], + "angle": 0, + "content": "Spatial reasoning, defined as the cognitive capability to interpret spatial object relationships, motion dynamics, and environmental interactions, constitutes a foundational requirement for mission-critical applications such as robotic systems, autonomous navigation, and augmented reality. These domains demand robust integration of visual perception and precise manipulation of spatial-temporal constraints for optimal action planning." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.777, + 0.49, + 0.922 + ], + "angle": 0, + "content": "Task Formulation Building upon the Visualization of Thought (VoT) (Wu et al., 2024) benchmarks, we design two challenging spatial reasoning benchmarks with enhanced complexity as shown in figure 3: Visual Navigation and Visual Tiling. We provide detailed materials of the differences between the original VoT benchmark setup and our experimental configuration in Appendix B and additionally provide the mathematical task formu" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.367, + 0.668, + 0.382 + ], + "angle": 0, + "content": "lation in appendix C." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.391, + 0.885, + 0.633 + ], + "angle": 0, + "content": "Visual Construction via Executor During task execution, robots deployed in true environments typically receive environmental feedback following each action, which facilitates perception and subsequent decision-making processes. In our methodology, we leverage environmental interaction tools to enhance the model's spatial reasoning capabilities. In each action, we employ an executor to implement the corresponding action, and return textual execution feedback and visuospatial hint (optional) representing the map state. In the context of (1) Visual Navigation, the visual feedback corresponds to the map including agent's current position; while in (2) Visual Tiling scenarios, it represents the current state of rectangle occupation patterns." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.644, + 0.696, + 0.659 + ], + "angle": 0, + "content": "5.1 Empirical Results" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.664, + 0.885, + 0.922 + ], + "angle": 0, + "content": "Setup We evaluate our framework on two spatial reasoning benchmarks: Visual Navigation and Visual Tiling. For Visual Navigation, we create three difficulty levels with increasing map complexity, where the level indicates the \\( k \\) for Visual Navigation as shown in table 2. For Visual Tiling, we focus on level-2 (i.e. \\( k = 2 \\)) problems with 119 samples. We compare our method against Chain-of-Thought (CoT), Visualization of Thought (VoT) (Wu et al., 2024). As table 2 indicates, the results from VoT with tool interactions (i.e. Executor) are also reported, where textual feedbacks are employed but the visual hints are still generated by the model rather from executor, consistent with the VoT framework. The source of visual hints distinguishes it from our method. We employ the same" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.15, + 0.085, + 0.496, + 0.273 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.499, + 0.084, + 0.844, + 0.273 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.112, + 0.292, + 0.884, + 0.378 + ], + "angle": 0, + "content": "Figure 4: (LEFT) The trend of Pass@1 rate on Visual Navigation as the number of reasoning steps increases. (RIGHT) The relationship between the Accuracy@1 on geometry problems (Geomverse) and tree width for rollout search. We observe that LVLMs significantly benefit from longer reasoning chains, although the effect plateaus rapidly beyond a certain threshold of reasoning steps. The relationship between performance and tree width exhibits a more complex pattern, demonstrating an inverted U-shaped trend with both GPT-4o and Claude-3.5-Sonnet." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.391, + 0.489, + 0.422 + ], + "angle": 0, + "content": "temperature and VisuoThink hyperparameters as section 4.1." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.438, + 0.49, + 0.662 + ], + "angle": 0, + "content": "Analysis In spatial reasoning experiments, VisuoThink demonstrates significant performance improvements over baseline methods, particularly when augmented with predictive rollout search. As shown in Table 2, VisuoThink achieves the highest accuracy across all tasks, outperforming both CoT and VoT baselines. For instance, on the Visual Navigation task, VisuoThink on GPT-4o achieves a \\(93.8\\%\\) accuracy at level-3, compared to \\(62.5\\%\\) for VoT with an executor and \\(18.8\\%\\) for CoT. This trend is consistent across different model architectures, including GPT-4o, Qwen2-VL-72B-Instruct, and Claude-3.5-sonnet, highlighting the robustness of our approach." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.664, + 0.49, + 0.922 + ], + "angle": 0, + "content": "Similar to the geometry experiments in Section 4, the integration of tool interactions and multi-step visual reasoning plays a critical role in enhancing performance. The executor's feedback mechanism, which provides visual updates after each action, mirrors the incremental visual refinement seen in geometry tasks, where auxiliary lines are progressively constructed. For instance, VisuoThink without rollout search demonstrates an average improvement of \\(34.7\\%\\) on Visual Tiling across diverse models. We observe that while VoT augmented with textual feedback achieves an average increase of \\(8.1\\%\\), its performance gain is notably less pronounced compared to VisuoThink without rollout search. This underscores the critical role of reliable visual cues in enhancing reasoning capa" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.391, + 0.883, + 0.438 + ], + "angle": 0, + "content": "bilities. The dynamic interaction allows the model to iteratively refine its reasoning path, leading to more accurate solutions." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.454, + 0.637, + 0.469 + ], + "angle": 0, + "content": "6 Discussion" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.482, + 0.885, + 0.61 + ], + "angle": 0, + "content": "In this section, we analyze key aspects of VisuoThink's performance. We examine how the length of reasoning chain affects spatial reasoning, the impact of child node expansion in rollout search, and the influence of supervision levels in predictive rollouts across tasks. These insights highlight VisuoThink's effectiveness and suggest future directions for multimodal reasoning frameworks." + }, + { + "type": "title", + "bbox": [ + 0.508, + 0.625, + 0.864, + 0.657 + ], + "angle": 0, + "content": "6.1 Could Longer Reasoning Chains Assist LVLMs in Reasoning?" + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.663, + 0.885, + 0.889 + ], + "angle": 0, + "content": "In practical applications of \\(LVLMs\\) for spatial reasoning tasks, each tool invocation can be seen as an agent attempting an action in the environment and receiving feedback. Although many attempts may be inaccurate, allowing the model more trial-and-error opportunities before achieving the final goal could potentially enhance its reasoning capabilities. By setting different upper limits on the number of reasoning steps in visual navigation tasks, we observe a positive correlation between the number of reasoning steps and the model's task completion rate. This suggests that the model indeed benefits from more tool invocations and longer reasoning." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.89, + 0.884, + 0.922 + ], + "angle": 0, + "content": "However, as the number of reasoning steps increases, the completion rate gradually converges," + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.085, + 0.493, + 0.327 + ], + "angle": 0, + "content": "making further significant improvements challenging. As shown in figure 4 (left), for instance, increasing reasoning steps from 10 to 20 resulted in substantial performance gains \\((+54.1\\%\\) and \\(+48.4\\%)\\) across different LVLM architectures (GPT-4o and Claude-3.5-sonnet). However, when reasoning steps were increased from 20 to 40, the performance growth slowed dramatically, dropping to \\(+6.5\\%\\) and \\(+2.1\\%\\), respectively. This phenomenon aligns with expectations, as merely increasing the number of tool invocations does not enable the model to better solve the most challenging samples. This underscores the necessity of techniques like rollout search within the broader context of test scaling." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.372, + 0.434, + 0.403 + ], + "angle": 0, + "content": "6.2 Could Larger Tree Span Enhances VisuoThink's Performance?" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.429, + 0.49, + 0.639 + ], + "angle": 0, + "content": "Predictive rollouts enhance the model's reasoning capabilities, which can be viewed as a tangible outcome of successfully expanding the model's reasoning search space. A natural question arises: Can we further improve the model's reasoning performance on benchmarks simply by increasing the number of candidate child nodes at each selection step, i.e., expanding the tree width, thereby enhancing model's reasoning capability? To investigate this, we conducted comparative experiments on geometry tasks using GPT-4o and Claude-3.5-sonnet, keeping the depth of the reasoning tree constant while varying the number of candidate child nodes." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.648, + 0.49, + 0.922 + ], + "angle": 0, + "content": "As presented in figure 4 (right), we observed an inverted U-shaped trend in overall performance as the number of candidate tree nodes increased across different model architectures. Notably, when the number of candidate child nodes equals 1, the model follows a single reasoning path, effectively bypassing predictive rollout search. Contrary to expectations, the performance trend initially rises and then declines. This counterintuitive result can be attributed to the inherent errors in the model's evaluation of child nodes. Simply and aggressively increasing the tree width leads to confusion in selecting child nodes, which in turn reduces overall reasoning efficiency. Thus, an interesting conclusion emerges: we cannot expect to continuously improve model performance by merely increasing the number of child nodes in rollout search." + }, + { + "type": "image", + "bbox": [ + 0.523, + 0.085, + 0.86, + 0.271 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.508, + 0.288, + 0.885, + 0.348 + ], + "angle": 0, + "content": "Figure 5: The performance gain \\((+ \\%)\\) on tasks through predictive rollout search. The performance gain is calculated via the performance gap between VisuoThink (w/o rollout search) and VisuoThink." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.363, + 0.883, + 0.394 + ], + "angle": 0, + "content": "6.3 Strong v.s. Weak Supervision in Predictive Rollout Search" + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.4, + 0.884, + 0.593 + ], + "angle": 0, + "content": "An intriguing observation is that the strength of guidance provided by predictive rollout results varies between geometry and spatial reasoning tasks. In geometry tasks, the model only receives the final numerical results of the problem, whereas in spatial reasoning tasks, the model has access to visual states of stronger supervision (e.g., the agent's final position, the position of the destination, etc.). In other words, predictive rollouts in geometry tasks offer weaker supervision, while those in spatial reasoning tasks provide stronger supervision." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.594, + 0.885, + 0.852 + ], + "angle": 0, + "content": "This observation aligns with the findings of the Deepseek R1 report, which highlights that outcome-based supervision in RL can significantly enhance Deepseek-R1-Zero's reasoning capabilities (DeepSeek-AI, 2025). The effectiveness of such supervision stems from its strong supervisory signal, and predictive rollouts with strong supervision are more effective in improving model reasoning performance. This is further supported by our experimental results, as illustrated in figure 5, where predictive rollouts demonstrated more substantial performance gains in spatial reasoning tasks compared to geometry tasks, across both open-source and closed-source models. The detailed performance gain results are presented in appendix A." + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.864, + 0.642, + 0.879 + ], + "angle": 0, + "content": "7 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.89, + 0.884, + 0.923 + ], + "angle": 0, + "content": "We present VisuoThink, a multimodal tree search framework enhancing LVLM reasoning through" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.085, + 0.493, + 0.264 + ], + "angle": 0, + "content": "dynamic visual-textual interleaving and predictive rollout search. Our approach demonstrates significant improvements across geometry and spatial reasoning tasks without requiring model fine-tuning. Empirical results show substantial performance gains on geometry and spatial reasoning benchmarks. Our analysis reveals key insights about tool interaction benefits, search space optimization, and supervision strength in multimodal reasoning. These findings open new possibilities for advancing LVLM capabilities in complex reasoning tasks." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.274, + 0.221, + 0.289 + ], + "angle": 0, + "content": "Limitations" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.299, + 0.49, + 0.525 + ], + "angle": 0, + "content": "Despite its strong performance, VisuoThink has several limitations. First, the predictive rollout search process introduces significant computational overhead, making it potentially impractical for real-time applications. Second, our approach particularly relies on tool interactions for stronger capability, which may require more effort in some specific deployment environments. Third, the framework's effectiveness is constrained by the quality of the base VLM's reasoning capabilities - while it enhances performance, it cannot overcome fundamental model limitations. Finally, our evaluation focuses primarily on geometric and spatial reasoning tasks." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.536, + 0.453, + 0.552 + ], + "angle": 0, + "content": "Ethics and Reproducibility Statements" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.561, + 0.49, + 0.691 + ], + "angle": 0, + "content": "Ethics We take ethical considerations very seriously and strictly adhere to the ACL Ethics Policy. This paper proposes a test-time slow-thinking framework to improve the multimodal reasoning ability of current LVLMs. All evaluation datasets used in this paper will be publicly available or have been widely adopted by researchers. Thus, we believe that this research will not pose ethical issues." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.699, + 0.49, + 0.812 + ], + "angle": 0, + "content": "Reproducibility In this paper, we discuss the detailed experimental setup, such as hyperparameters, implementation of algorithm, and statistic descriptions. More importantly, we will open source our code and data in the future to help reproduce the experimental results of this paper." + }, + { + "type": "title", + "bbox": [ + 0.512, + 0.084, + 0.61, + 0.099 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.109, + 0.884, + 0.137 + ], + "angle": 0, + "content": "Afra Amini, Tim Vieira, and Ryan Cotterell. 2024. Variational best-of-n alignment. *ArXiv*, abs/2407.06057." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.148, + 0.885, + 0.189 + ], + "angle": 0, + "content": "Guoxin Chen, Minpeng Liao, Chengxi Li, and Kai Fan. 2024. Alphamath almost zero: process supervision without process. ArXiv, abs/2405.03553." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.2, + 0.885, + 0.268 + ], + "angle": 0, + "content": "Anoop Cherian, Kuan-Chuan Peng, Suhas Lohit, Joanna Matthiesen, Kevin Smith, and Joshua B Tenenbaum. 2024. Evaluating large vision-and-language models on children's mathematical olympiads. arXiv preprint arXiv:2406.15736." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.279, + 0.885, + 0.32 + ], + "angle": 0, + "content": "DeepSeek-AI. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. Preprint, arXiv:2501.12948." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.331, + 0.885, + 0.386 + ], + "angle": 0, + "content": "Yifan Du, Zikang Liu, Yifan Li, Wayne Xin Zhao, Yuqi Huo, Bingning Wang, Weipeng Chen, Zheng Liu, Zhongyuan Wang, and Jiahui Wen. 2025. Virgo: A preliminary exploration on reproducing o1-like mllm." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.397, + 0.885, + 0.451 + ], + "angle": 0, + "content": "Xidong Feng, Ziyu Wan, Muning Wen, Ying Wen, Weinan Zhang, and Jun Wang. 2023. Alphazero-like tree-search can guide large language model decoding and training. ArXiv, abs/2309.17179." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.462, + 0.885, + 0.529 + ], + "angle": 0, + "content": "Timin Gao, Peixian Chen, Mengdan Zhang, Chaoyou Fu, Yunhang Shen, Yan Zhang, Shengchuan Zhang, Xiawu Zheng, Xing Sun, Liujuan Cao, and Rongrong Ji. 2024. Cantor: Inspiring multimodal chain-of-thought of mllm. ArXiv, abs/2404.16033." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.54, + 0.885, + 0.594 + ], + "angle": 0, + "content": "Lin Gui, Cristina Garbacea, and Victor Veitch. 2024. Bonbon alignment for large language models and the sweetness of best-of-n sampling. ArXiv, abs/2406.00832." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.606, + 0.885, + 0.672 + ], + "angle": 0, + "content": "Yushi Hu, Weijia Shi, Xingyu Fu, Dan Roth, Mari Ostendorf, Luke S. Zettlemoyer, Noah A. Smith, and Ranjay Krishna. 2024. Visual sketchpad: Sketching as a visual chain of thought for multimodal language models. ArXiv, abs/2406.09403." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.684, + 0.885, + 0.712 + ], + "angle": 0, + "content": "Daniel Kahneman. 2011. Thinking, fast and slow. Farrar, Straus and Giroux." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.724, + 0.885, + 0.778 + ], + "angle": 0, + "content": "Mehran Kazemi, Hamidreza Alvari, Ankit Anand, Jialin Wu, Xi Chen, and Radu Soricut. 2023. Geomverse: A systematic evaluation of large models for geometric reasoning. Preprint, arXiv:2312.12241." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.789, + 0.885, + 0.856 + ], + "angle": 0, + "content": "Xuanyu Lei, Zonghan Yang, Xinrui Chen, Peng Li, and Yang Liu. 2024. Scaffolding coordinates to promote vision-language coordination in large multi-modal models. In International Conference on Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.868, + 0.885, + 0.922 + ], + "angle": 0, + "content": "Chengzu Li, Wenshan Wu, Huanyu Zhang, Yan Xia, Shaoguang Mao, Li Dong, Ivan Vuli'c, and Furu Wei. 2025. Imagine while reasoning in space: Multimodal visualization-of-thought." + }, + { + "type": "list", + "bbox": [ + 0.511, + 0.109, + 0.885, + 0.922 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.086, + 0.487, + 0.152 + ], + "angle": 0, + "content": "Jiacheng Liu, Andrew Cohen, Ramakanth Pasunuru, Yejin Choi, Hannaneh Hajishirzi, and Asli Celikyilmaz. 2023. Don't throw away your value model! generating more preferable text with value-guided monte-carlo tree search decoding." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.162, + 0.487, + 0.226 + ], + "angle": 0, + "content": "Pan Lu, Ran Gong, Shibiao Jiang, Liang Qiu, Siyuan Huang, Xiaodan Liang, and Song-Chun Zhu. 2021. Inter-gps: Interpretable geometry problem solving with formal language and symbolic reasoning. Preprint, arXiv:2105.04165." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.237, + 0.487, + 0.303 + ], + "angle": 0, + "content": "Chancharik Mitra, Brandon Huang, Trevor Darrell, and Roei Herzig. 2023. Compositional chain-of-thought prompting for large multimodal models. 2024 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 14420-14431." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.311, + 0.487, + 0.377 + ], + "angle": 0, + "content": "Debjyoti Mondal, Suraj Modi, Subhadarshi Panda, Ritraj Singh, and Godawari Sudhakar Rao. 2024. Kamcot: Knowledge augmented multimodal chain-of-thoughts reasoning. In AAAI Conference on Artificial Intelligence." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.386, + 0.487, + 0.412 + ], + "angle": 0, + "content": "OpenAI. 2024a. Gpt-4o system card. Preprint, arXiv:2410.21276." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.422, + 0.429, + 0.435 + ], + "angle": 0, + "content": "OpenAI. 2024b. Learning to reason with llms." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.444, + 0.487, + 0.523 + ], + "angle": 0, + "content": "Runqi Qiao, Qiuna Tan, Guanting Dong, Minhui Wu, Chong Sun, Xiaoshuai Song, Zhuoma GongQue, Shanglin Lei, Zhe Wei, Miaoxuan Zhang, and 1 others. 2024. We-math: Does your large multimodal model achieve human-like mathematical reasoning? arXiv preprint arXiv:2407.01284." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.533, + 0.487, + 0.584 + ], + "angle": 0, + "content": "Santhosh Kumar Ramakrishnan, Erik Wijmans, Philipp Kraehenbuehl, and Vladlen Koltun. 2024. Does spatial cognition emerge in frontier models? Preprint, arXiv:2410.06468." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.594, + 0.487, + 0.647 + ], + "angle": 0, + "content": "Noah Shinn, Federico Cassano, Edward Berman, Ashwin Gopinath, Karthik Narasimhan, and Shunyu Yao. 2023. Reflexion: Language agents with verbal reinforcement learning. Preprint, arXiv:2303.11366." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.656, + 0.487, + 0.708 + ], + "angle": 0, + "content": "Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. 2024. Scaling llm test-time compute optimally can be more effective than scaling model parameters. ArXiv, abs/2408.03314." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.718, + 0.487, + 0.771 + ], + "angle": 0, + "content": "Hanshi Sun, Momin Haider, Ruiqi Zhang, Huitao Yang, Jiahao Qiu, Ming Yin, Mengdi Wang, Peter Bartlett, and Andrea Zanette. 2024. Fast best-of-n decoding via speculative rejection. ArXiv, abs/2410.20290." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.78, + 0.487, + 0.82 + ], + "angle": 0, + "content": "Gemini Team. 2024. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. Preprint, arXiv:2403.05530." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.829, + 0.487, + 0.921 + ], + "angle": 0, + "content": "Omkar Thawakar, Dinura Dissanayake, Ketan More, Ritesh Thawkar, Ahmed Heakl, Noor Ahsan, Yuhao Li, Mohammed Zumri, Jean Lahoud, Rao Muhammad Anwer, Hisham Cholakkal, Ivan Laptev, Mubarak Shah, Fahad Shahbaz Khan, and Salman H. Khan. 2025. Llamav-o1: Rethinking step-by-step visual reasoning in llms." + }, + { + "type": "list", + "bbox": [ + 0.117, + 0.086, + 0.487, + 0.921 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.086, + 0.881, + 0.139 + ], + "angle": 0, + "content": "Wenshan Wu, Shaoguang Mao, Yadong Zhang, Yan Xia, Li Dong, Lei Cui, and Furu Wei. 2024. Mind's eye of llms: Visualization-of-thought elicits spatial reasoning in large language models." + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.149, + 0.883, + 0.227 + ], + "angle": 0, + "content": "Kun Xiang, Zhili Liu, Zihao Jiang, Yunshuang Nie, Runhui Huang, Haoxiang Fan, Hanhui Li, Weiran Huang, Yihan Zeng, Jianhua Han, Lanqing Hong, Hang Xu, and Xiaodan Liang. 2024. Atomthink: A slow thinking framework for multimodal mathematical reasoning. Preprint, arXiv:2411.11930." + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.238, + 0.883, + 0.291 + ], + "angle": 0, + "content": "Yuxi Xie, Kenji Kawaguchi, Yiran Zhao, Xu Zhao, MingSung Kan, Junxian He, and Qizhe Xie. 2023. Self-evaluation guided beam search for reasoning. In Neural Information Processing Systems." + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.3, + 0.883, + 0.34 + ], + "angle": 0, + "content": "Guowei Xu, Peng Jin, Hao Li, Yibing Song, Lichao Sun, and Li Yuan. 2024. Llava-cot: Let vision language models reason step-by-step. ArXiv, abs/2411.10440." + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.35, + 0.883, + 0.415 + ], + "angle": 0, + "content": "Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Ehsan Azarnasab, Faisal Ahmed, Zicheng Liu, Ce Liu, Michael Zeng, and Lijuan Wang. 2023. Mm-react: Prompting chatgpt for multimodal reasoning and action. ArXiv, abs/2303.11381." + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.426, + 0.883, + 0.504 + ], + "angle": 0, + "content": "Huanjin Yao, Jiaxing Huang, Wenhao Wu, Jingyi Zhang, Yibo Wang, Shunyu Liu, Yingjie Wang, Yuxin Song, Haocheng Feng, Li Shen, and Dacheng Tao. 2024. Mulberry: Empowering mllm with o1-like reasoning and reflection via collective monte carlo tree search. ArXiv, abs/2412.18319." + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.514, + 0.883, + 0.568 + ], + "angle": 0, + "content": "Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik Narasimhan, and Yuan Cao. 2023. React: Synergizing reasoning and acting in language models. Preprint, arXiv:2210.03629." + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.577, + 0.883, + 0.617 + ], + "angle": 0, + "content": "Fei Yu, Anningzhe Gao, and Benyou Wang. 2023. Ovm, outcome-supervised value models for planning in mathematical reasoning. In NAACL-HLT." + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.627, + 0.883, + 0.704 + ], + "angle": 0, + "content": "Zhiyuan Zeng, Qinyuan Cheng, Zhangyue Yin, Bo Wang, Shimin Li, Yunhua Zhou, Qipeng Guo, Xuanjing Huang, and Xipeng Qiu. 2024. Scaling of search and learning: A roadmap to reproduce o1 from reinforcement learning perspective. Preprint, arXiv:2412.14135." + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.716, + 0.883, + 0.768 + ], + "angle": 0, + "content": "Zhuosheng Zhang, Aston Zhang, Mu Li, Hai Zhao, George Karypis, and Alexander J. Smola. 2023. Multimodal chain-of-thought reasoning in language models. Trans. Mach. Learn. Res., 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.778, + 0.883, + 0.831 + ], + "angle": 0, + "content": "Wenliang Zhao, Yongming Rao, Zuyan Liu, Benlin Liu, Jie Zhou, and Jiwen Lu. 2023. Unleashing text-to-image diffusion models for visual perception. Preprint, arXiv:2303.02153." + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.841, + 0.883, + 0.906 + ], + "angle": 0, + "content": "Qiji Zhou, Ruochen Zhou, Nike Hu, Panzhong Lu, Siyang Gao, and Yue Zhang. 2024. Image-of-thought prompting for visual reasoning refinement in multimodal large language models. ArXiv, abs/2405.13872." + }, + { + "type": "list", + "bbox": [ + 0.513, + 0.086, + 0.883, + 0.906 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "title", + "bbox": [ + 0.115, + 0.084, + 0.454, + 0.119 + ], + "angle": 0, + "content": "A Performance Gain of VisuoThink Through Predictive Rollout Search" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.15, + 0.49, + 0.278 + ], + "angle": 0, + "content": "This appendix quantifies the performance improvements achieved by integrating predictive rollout search into the VisuoThink framework across geometry and spatial reasoning tasks. The performance gain through predictive rollout search is derived by subtracting the performance of VisuoThink (w/o rollout search) from those of the VisuoThink on models." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.302, + 0.49, + 0.464 + ], + "angle": 0, + "content": "As shown in Table 3, tasks with strong supervision (e.g., Visual Navigation and Visual Tiling) exhibit significantly higher gains compared to weak supervision tasks (e.g., Geometry \\(3K\\) and Geomverse-109). For instance, under strong supervision, Claude-3.5-Sonnet achieves a \\(+25.1\\%\\) improvement in Visual Navigation, while GPT-4o attains \\(+16.6\\%\\) in Visual Tiling. In contrast, weak supervision tasks like Geomverse-109 only show modest gains (e.g., \\(+5.4\\%\\) for GPT-4o)." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.512, + 0.453, + 0.53 + ], + "angle": 0, + "content": "B OKSpatial Reasoning Task Setting" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.561, + 0.49, + 0.691 + ], + "angle": 0, + "content": "Our formulation extends beyond VoT's basic requirements by mandating LVLMs to generate comprehensive operational specifications - for instance, requiring explicit output of both movement directions and precise step counts at each decision node. This advancement creates more realistic and functionally grounded spatial reasoning evaluations (e.g., robotic navigation emulation in real world)." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.713, + 0.492, + 0.922 + ], + "angle": 0, + "content": "This appendix details the task formulation differences between VisuoThink and baseline methods (Table 4 and Table 5). For Visual Navigation, VisuoThink requires fine-grained, executable and explicit specification of both direction and step count in action sequences, whereas VoT focuses solely on direction navigation. This formulation mirrors real-world robotic navigation, where precise movement planning is critical. Similarly, in Visual Tiling, VisuoThink mandates detailed actions, including polyomino variant types, block positions, and action types (e.g., \"fit\" or \"remove\"), while VoT simplifies the task by omitting variant specifications." + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.084, + 0.883, + 0.116 + ], + "angle": 0, + "content": "C Task Formulation of Spatial Reasoning Tasks" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.126, + 0.884, + 0.158 + ], + "angle": 0, + "content": "Building upon VoT (Wu et al., 2024) framework, our challenging benchmarks comprise:" + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.166, + 0.885, + 0.376 + ], + "angle": 0, + "content": "- Visual Navigation evaluates LVLMs in a simulated 2D grid environment, where agents must navigate from initial position \\(\\mathbf{s}_0\\) to destination \\(\\mathbf{s}_k\\) through obstacle-laden paths. The formal problem is defined by grid map \\(\\mathbf{M}\\) containing \\(k\\) interconnected edges \\(\\mathbf{E} = \\{\\mathbf{e}(\\mathbf{s}_0,\\mathbf{s}_1),\\mathbf{e}(\\mathbf{s}_1,\\mathbf{s}_2),\\ldots ,\\mathbf{e}(\\mathbf{s}_{k - 1},\\mathbf{s}_k)\\}\\). The LVLM should generate a sequence of executable actions in json format \\(\\mathbf{A} = \\{(\\mathbf{d}_0,\\mathbf{l}_0),(\\mathbf{d}_1,\\mathbf{l}_1),\\ldots ,(\\mathbf{d}_{|\\mathbf{A}| - 1},\\mathbf{l}_{|\\mathbf{A}| - 1})\\}\\), where each tuple specifies movement direction \\(\\mathbf{d}_i\\) and exact step count \\(\\mathbf{l}_i\\), governed by the policy:" + }, + { + "type": "equation", + "bbox": [ + 0.615, + 0.385, + 0.884, + 0.403 + ], + "angle": 0, + "content": "\\[\n\\mathbf {a} _ {\\mathbf {t}} \\sim \\mathcal {P} \\left(\\mathbf {d} _ {t}, \\mathbf {l} _ {t} \\mid \\mathbf {A} _ {t - 1}, \\mathbf {M}\\right) \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.416, + 0.887, + 0.609 + ], + "angle": 0, + "content": "- Visual Tiling is a classic geometric reasoning challenge, this task assesses polyomino composition capabilities within confined rectangular regions \\(\\mathbf{R}\\) masked by \\(k\\) distinct polyominoes \\(\\mathbf{MP} = \\{\\mathbf{mp}_1,\\dots ,\\mathbf{mp}_k\\}\\). The LVLM must output action sequences \\(\\mathbf{a}_t = (\\mathbf{p}_t,\\{\\mathbf{b}_1,\\dots ,\\mathbf{b}_{|B|}\\},\\mathbf{a}\\mathbf{t}_t)\\), where \\(\\mathbf{p}_t\\) and \\(\\mathbf{B} = \\{\\mathbf{b}_1,\\dots ,\\mathbf{b}_{|\\mathbf{B}|}\\}\\) respectively indicate the selected polyomino type and the coordinates of the placement blocks. \\(\\mathbf{at}_t\\in \\{\\text{fit, remove}\\}\\) indicates the action type modifying rectangular state \\(\\mathbf{R}_t\\), thus formalized as:" + }, + { + "type": "equation", + "bbox": [ + 0.564, + 0.638, + 0.884, + 0.671 + ], + "angle": 0, + "content": "\\[\n\\mathbf {a} _ {t} \\sim \\mathcal {P} \\left(\\mathbf {p} _ {t}, \\mathbf {B}, \\mathbf {a t} _ {t} \\mid \\mathbf {R} _ {t - 1}, \\mathbf {M P}, \\mathbf {A} _ {t - 1} \\}\\right) \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.544, + 0.676, + 0.884, + 0.757 + ], + "angle": 0, + "content": "Though the required actions are polyomino variant-aware as shown in table 5. As the polyomino variant type is implicitly expressed in the block positions, LVLM does not need to explicitly output it in actions anymore." + } + ], + [ + { + "type": "table", + "bbox": [ + 0.197, + 0.082, + 0.803, + 0.182 + ], + "angle": 0, + "content": "
Supervision TypePerformance GainGPT-4oQwen2-VL-72BClaude-3.5-Sonnet
Strong SupervisionΔ Visual Navigation (%)+16.6+18.9+15.5
Δ Visual Tiling (%)+31.9+11.0+3.3
Δ Average (%)+24.3+15.0+9.4
Weak SupervisionΔ Geometry3K (%)+4.5+6.6+1.1
Δ Geomverse-109 (%)+6.2+4.2+6.3
Δ Average (%)+5.4+5.4+3.7
" + }, + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.191, + 0.884, + 0.22 + ], + "angle": 0, + "content": "Table 3: Detailed performance gain of VisuoThink through predictive rollout search on benchmarks from Geometry and Spatial Reasoning over variable LVLM models." + }, + { + "type": "table", + "bbox": [ + 0.197, + 0.233, + 0.803, + 0.284 + ], + "angle": 0, + "content": "
MethodDirectionStepsTarget
Visual NavigationVoT×Navigate from the starting position
VisuoThinkto the destination.
" + }, + { + "type": "table_caption", + "bbox": [ + 0.218, + 0.293, + 0.778, + 0.308 + ], + "angle": 0, + "content": "Table 4: Visual Navigation task setting differences between VoT and VisuoThink." + }, + { + "type": "table", + "bbox": [ + 0.12, + 0.321, + 0.879, + 0.4 + ], + "angle": 0, + "content": "
MethodActionTarget
Polyomino TypeVariant TypeBlock PositionsAction Type
Visual TilingVoTTo identify the correct variant for a polyomino in one action.
VisuoThinkTo fill the rectangle with feasible polyomino variants.
" + }, + { + "type": "table_caption", + "bbox": [ + 0.236, + 0.409, + 0.759, + 0.424 + ], + "angle": 0, + "content": "Table 5: Visual Tiling task setting differences between VoT and VisuoThink." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.448, + 0.35, + 0.482 + ], + "angle": 0, + "content": "D Model and VisuoThink Hyperparameters" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.492, + 0.49, + 0.522 + ], + "angle": 0, + "content": "We detail the model and VisuoThink Hyperparameters:" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.534, + 0.49, + 0.646 + ], + "angle": 0, + "content": "Model Hyperparameters To ensure experimental fairness, we uniformly constrained the number of reasoning steps (i.e., \\(\\tau\\), the depth of the reasoning tree) to 10 across all experiments. During predictive rollout search, we set the number of sampled child nodes to 3, and we discuss its impact in section 6.2." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.657, + 0.49, + 0.834 + ], + "angle": 0, + "content": "VisuoThink Hyperparameters While VisuoThink employed a temperature of 0.8 when sampling child nodes, all other model invocations, including the baselines (e.g. CoT, VoT, VisualSketchpad, VisuoThink w/o rollout search), were conducted with temperature set to \\(O\\) for frontier performance. During the voting phase, we similarly maintained a temperature of \\(O\\) and implemented single-vote sampling, which not only reduced computational overhead in terms of model calls but also achieved comparable performance." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.846, + 0.468, + 0.881 + ], + "angle": 0, + "content": "E Geomverse-109 Problem Generation Trajectory" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.89, + 0.49, + 0.922 + ], + "angle": 0, + "content": "We establish a pipeline translating textual problems into problems with matplotlib-executable code. Be" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.449, + 0.885, + 0.644 + ], + "angle": 0, + "content": "yond the Geometry3K (Lu et al., 2021) dataset (48 problems) utilized in Sketchpad, we incorporate the D2 subset of Geomverse (Kazemi et al., 2023) to construct an slightly bigger dataset Geomverse-109 (90 problems). The original Geomverse dataset crucially includes annotated point coordinates essential for systematic problem synthesis. During the data synthesis phase, we first randomly choose 109 problems, then LVLMs generate corresponding high-quality Python code through LLM self-reflection (Shinn et al., 2023), then we filter out problems with poor diagram quality." + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09130/c8e9b00e-528a-4dec-8df2-9d7d8a1767a5_origin.pdf b/data/2025/2504_09xxx/2504.09130/c8e9b00e-528a-4dec-8df2-9d7d8a1767a5_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..f2e83da848e2fd8c1094f763edaa98e4162fe683 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09130/c8e9b00e-528a-4dec-8df2-9d7d8a1767a5_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:06137ace5472142fcab0cc4c32cccdd0e662b366b382d85c011fda486cdccd57 +size 1700916 diff --git a/data/2025/2504_09xxx/2504.09130/full.md b/data/2025/2504_09xxx/2504.09130/full.md new file mode 100644 index 0000000000000000000000000000000000000000..d599c4bb233d7fc0113113fba2c5d1d2ae34ff44 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09130/full.md @@ -0,0 +1,302 @@ +# VisuoThink: Empowering LVLM Reasoning with Multimodal Tree Search + +Yikun Wang $^{12}$ , Siyin Wang $^{12}$ , Qinyuan Cheng $^{1}$ , Zhaoye Fei $^{1}$ , Liang Ding $^{3}$ , Qipeng Guo $^{24}$ , Dacheng Tao $^{5}$ , Xipeng Qiu $^{\dagger 12}$ + +$^{1}$ Fudan University $^{2}$ Shanghai Innovation Institute + +3 The University of Sydney 4 Shanghai AI Laboratory 5 Nanyang Technological University yikunwang19@fudan.edu.cn + +# Abstract + +Recent advancements in Large Vision-Language Models have showcased remarkable capabilities. However, they often falter when confronted with complex reasoning tasks that humans typically address through visual aids and deliberate, step-by-step thinking. While existing methods have explored text-based slow thinking or rudimentary visual assistance, they fall short of capturing the intricate, interleaved nature of human visual-verbal reasoning processes. To overcome these limitations and inspired by the mechanisms of slow thinking in human cognition, we introduce VisuoThink, a novel framework that seamlessly integrates visuospatial and linguistic domains. VisuoThink facilitates multimodal slow thinking by enabling progressive visual-textual reasoning and incorporates test-time scaling through look-ahead tree search. Extensive experiments demonstrate that VisuoThink significantly enhances reasoning capabilities via inference-time scaling, even without fine-tuning, achieving state-of-the-art performance in tasks involving geometry and spatial reasoning. Our code has been open-sourced at https://github.com/ekonwang/VisuoThink. + +# 1 Introduction + +Recent advances in Large Vision-Language Models (LVLMs) (OpenAI, 2024a; Team, 2024) have shown remarkable progress across a variety of tasks. However, these models often struggle with complex reasoning challenges, such as geometric problem-solving (Qiao et al., 2024; Cherian et al., 2024) or spatial reasoning (Ramakrishnan et al., 2024; Wu et al., 2024), where human problem-solving approaches typically rely on visual aids. For example, when solving geometry problems, humans often iteratively sketch auxiliary lines or + +![](images/b71e7927fc17a4de44c8768ddd0efc75420e0d30f5c8c396b6e61ee8eb632908.jpg) + +![](images/c2f0c45a628a1f3c0208077c01253fb50db99a5005e6237aec7ea485fefab551.jpg) + +![](images/e6c77e8ae3dba5f734011284a7f9ec8b6a31587cd454c7757bc86e754e59371e.jpg) +Figure 1: Illustration of Input-Output Prompting, CoT, Vision-aided Thought and our VisuoThink. Vision-aided Thought often relies on reasoning with one-step or unreliable multi-step visual cues (generated by LVLMs). While VisuoThink addresses this gap through tool-augmented visual hints, coupled with a predictive-rollout search mechanism to systematically optimize reasoning capability. + +visualize intermediate steps, while exploring different reasoning paths - a form of "slow thinking" (Kahneman, 2011) that combines visual and verbal cognitive processes. + +With the success of o1 series models (OpenAI, 2024b), researchers have explored language as a medium for implementing slow thinking, coupled with test-time scaling techniques (Zeng et al., 2024). Given the inherently multimodal nature of reality, early efforts (Xu et al., 2024; Thawakar et al., 2025; Yao et al., 2024; Du et al., 2025) have attempted to extend such deliberative thinking to multimodal reasoning. However, even augmented with search strategy, these methods treat visual information merely as static input, relying solely on textual reasoning chains during the reasoning process - creating a "visual blind spot", where the + +potential for visual information throughout the reasoning process is largely ignored (Fig. 1a). On the other hand, while approaches like VisualSketchpad (Hu et al., 2024) and VoT (Wu et al., 2024) have recognized the importance of visual information by incorporating visual aids in reasoning (Fig. 1b), they mainly focus on single-step assistance or simplified visual hints (e.g., emojis). These methods lack the multi-step visual-textual interleaved reasoning process that characterizes human slow thinking, while failing to explore potential search strategies. + +To address these limitations, we propose VisuoThink, a multimodal tree search framework that systematically explores multiple reasoning paths with vision-text interleaved thinking at each step. Unlike previous approaches, Visuothink (Fig. 1c) enables multimodal slow thinking through two key innovations: (1) a step-by-step vision-text interleaved reasoning framework that dynamically utilizes multi-step visual aids from tool uses, and (2) a look-ahead tree search algorithm that explores multiple reasoning paths, enabling test-time scaling of the reasoning process. Specifically, our look-ahead tree search incorporates a predictive rollout mechanism that simulates the likely outcomes of different reasoning states. This allows the model to prioritize more promising paths and avoid less ones, guiding the reasoning process toward the optimal solution. Through this test-time scaling capability, the model can thoroughly explore and optimize reasoning paths dynamically during inference. + +Our empirical evaluation demonstrates that Visuothink significantly outperforms existing methods across various reasoning tasks, particularly in geometry and spatial reasoning domains. On Geomeverse, Our methods achieves an accuracy@1 as high as $48.5\%$ , with an improvement of as high as $21.8\%$ over the state-of-the-art baseline, which particularly shows strong performance of VisuoThink on problems requiring multi-step visual reasoning. Through extensive ablation studies, we show that each component of our framework contributes meaningfully to its overall performance. + +In summary, our contributions include: + +- We propose a novel reasoning paradigm, multimodal tree search, for multimodal slow thinking that enables dynamic integration of visual and verbal reasoning paths throughout the problem-solving search process. +- We extend test-time scaling methods to the vi + +sual domain by proposing a predictive rollout mechanism that explores and optimizes visual reasoning paths by predicting future states. + +- We demonstrate substantial empirical improvements across multiple reasoning tasks, particularly in geometry and spatial reasoning, with detailed analyses revealing key insights about our approach. + +# 2 Related Work + +# 2.1 Text-centric Reasoning in LVLMs + +With the emergence of o1 models (OpenAI, 2024b), the importance of slow thinking has become increasingly evident (Zeng et al., 2024). Several works have attempted to extend this to LVLMs through methods like stage-wise reasoning (Xuet al., 2024), curriculum learning (Thawakar et al., 2025), tree search-based data generation (Yao et al., 2024), and LLM distillation (Du et al., 2025). However, these methods treat visual information as static input, relying only on textual data during reasoning, which limits their ability to fully leverage multimodal information for complex tasks. + +# 2.2 Vision-aided Reasoning + +Recent advancements in multimodal reasoning have demonstrated that incorporating visual information provides richer context and hints compared to text-only approaches. Early studies adopted a two-stage approach, where visual information is first transformed and grounded into text (Zhang et al., 2023), graph structures (e.g., scene graphs (Mitra et al., 2023) or knowledge graphs (Mondal et al., 2024)), or bounding boxes (Lei et al., 2024), followed by reasoning. Other works leverage existing vision models (e.g., segmentation, detection) to process input images into valuable cues for perception, enabling more precise image-understanding with fine-grained visual information (Yang et al., 2023; Zhou et al., 2024; Gao et al., 2024). + +Another sequence of research focuses on intermediate visual representations to enhance reasoning. For instance, Visual Sketchpad (Hu et al., 2024) employs Python-based drawing tools to generate sketches as intermediate visual aids for geometric problems, while VoT (Wu et al., 2024) formalizes visual thinking by generating emoji-like textual representations. MVOT (Li et al., 2025) fine-tunes multimodal models to generate images + +![](images/29ed049967b9b04a6c7dcd056d68c10988ff9f39a5f29f8d545483f002908c51.jpg) +Figure 2: The illustration of our VisuoThink framework with three stages: (1) vision-text interleaved expansion: generates candidate paths through vision-text interleaved thinking; (2) rollout simulation: sample candidate reasoning nodes and then perform look-ahead search to better evaluate the value of current states; (3) selection: selects the most promising path via self-voting with results or states from rollout. + +during reasoning, allowing the model to create visual aids dynamically. Despite these advancements, most existing methods rely on single-step or unreliable visual representations, lacking search mechanisms to test-time scaling through exploring multiple reasoning paths. In contrast, we develop a multimodal tree search framework that both leverages multi-step visual cues during reasoning and systematically explores reasoning paths through tree search. + +# 2.3 Test-time Scaling with Tree Search + +Scaling compute at test time has emerged as a powerful strategy to enhance LLMs' reasoning capabilities without increasing model parameters (Snell et al., 2024). Various approaches including BoN (Gui et al., 2024; Sun et al., 2024; Amini et al., 2024), guided beam search (Xie et al., 2023; Yu et al., 2023), and Monte Carlo Tree Search (MCTS) (Feng et al., 2023; Liu et al., 2023; Chen et al., 2024) have been explored for text models, demonstrating improved performance through different search strategies. However, the exploration of test-time scaling in LVLMs remains limited. Prior work like AtomThink (Xiang et al., 2024) has only investigated basic methods such as beam search, with text-only reasoning chains. In contrast, our method introduces vision-text interleaved thinking with look-ahead search, extending test-time scaling to multimodal reasoning. + +# 3 VisuoThink + +We propose VisuoThink, a novel framework for multimodal reasoning that dynamically integrates + +visual and textual information during the inference process. At its core, our framework implements multimodal slow thinking through a key mechanism: predictive rollout search that allows models to think ahead. + +# 3.1 Vision-Text Interleaved Thinking + +Our framework facilitates vision-text interleaved reasoning through an iterative cycle of Thought, Action, and Observation like existing work (Yao et al., 2023), which enables natural and dynamic interactions with external tools. (1) Thought phase: the model leverages visual information for textual reasoning (such as analyzing patterns based on previously added auxiliary lines) and determines the next step by planning what visual hints should be added to enhance understanding. (2) Action phase: the model executes the planned operations by calling external tools (like using Python code to draw auxiliary lines or highlight key features) to generate or modify visual information. (3) Observation phase: the model processes the visual feedback from the Action phase, incorporating these new visual hints into the next reasoning step. + +The importance of visual information for LVLM reasoning is highlighted in VisuoThink, which utilize tool invocations to construct reliable visual hints step by step in a visual construction process. This tool-based design allows VisuoThink to flexibly adapt to various visual reasoning tasks. Moreover, unlike approaches (e.g. VisualSketchpad) that generate all visual aids at once, our step-by-step visual guidance naturally integrates with search techniques, enabling effective test-time scaling. + +# 3.2 Predictive Rollout Search + +Based on tree search methods and inspired by MCTS, we propose a predictive rollout search mechanism that interleaves visual-text thinking. By anticipating the outcomes of intermediate states, the model can make timely corrections, enabling more accurate and powerful reasoning. As shown in Figure 2, at each reasoning step, our framework first generates multiple candidate paths through vision-text interleaved thinking, then simulates these paths to predict their outcomes, and finally selects the most promising path through a self-voting mechanism. + +Vision-Text Interleaved Expansion In the whole reasoning chain $\mathbf{A} = \{\mathbf{a}_1, \mathbf{a}_2, \dots, \mathbf{a}_t\}$ , given the current node $\mathbf{a}_{t-1}$ , the model samples $k$ candidate nodes $\mathbf{S}_t = \{\mathbf{s}_t^1, \mathbf{s}_t^2, \dots, \mathbf{s}_t^k\}$ . Each candidate follows the vision-text interleaved thinking process described above, generating a sequence of Thought, Action, and Observation steps. This expansion creates a tree of possible reasoning paths, each representing a different problem-solving strategy. + +Rollout Simulation Visual reasoning often requires multiple steps to reach a conclusion, making it crucial to evaluate the full potential of each path. For each candidate node $\mathbf{s}_t^i$ , the model simulates the complete reasoning process to predict final outcomes $\mathbf{r}_t^i$ , rather than relying solely on immediate state evaluation. Different from expansion, the simulation extends each candidate node with a single path of vision-text interleaved thinking until reaching a final result. + +Selection The selection of the optimal path is performed through a self-voting mechanism. The model considers the task description, historical nodes, and the simulated path with predicted results for each candidate node. The selection process can be formalized as: + +$$ +\mathbf {S e l e c t} \left(\mathbf {S} _ {t}\right) = \underset {\mathbf {s} _ {t} ^ {i} \in \mathbf {S} _ {t}} {\arg \max } \mathbf {V o t e} \left(\mathbf {A} _ {t - 1}, \mathbf {s} _ {t} ^ {i}, \mathbf {r} _ {t} ^ {i}\right) \tag {1} +$$ + +where $\mathbf{A}_{t - 1}$ represents the historical context, $\mathbf{s}_t^i$ for the candidate node, and $\mathbf{r}_t^i$ is the predicted result or final state. The Select is a heuristic function served by the LVLM model to guide the process. This selection ensures the model pursues the most promising reasoning strategy. + +# 4 Solving Geometry with VisuoThink + +The core of our methodology is rooted in multi-step visual information processing and search-based rea + +soning, enabling LVLMs to address strongly constrained mathematical problems (e.g., geometry challenges) and open-domain scenarios (such as visual navigation and visual tiling in section 5). + +We formalize geometry problem-solving as a two-phase process integrating visual construction and algebraic computation. In Phase I, the model generates auxiliary lines defined by geometric constraints, such as connecting points $(x_{i},y_{i})$ and $(x_{j},y_{j})$ , construct a perpendicular or parallel line to form line segments $\mathbf{L} = \{l_i\}$ . This phase terminates with a AUX-END token, triggering Phase II, where geometric relationships are translated into solvable equations (e.g., $ax + b = 0$ ) through Python code execution. + +Task Formulation LVLM should produce the reasoning trajectory consisting of reasoning steps $\mathbf{A} = \{\mathbf{a}_t\}$ that leads to the final result $\mathbf{r}$ , given the original problem $\mathbf{Q}$ while taking into account the auxiliary lines $\mathbf{L}$ . The framework operates under a constraint $\sum_{t=1}^{|A|} \| \mathbf{a}_t \| \leq \tau$ , where $\mathbf{a}_t$ denotes visual-textual reasoning steps and $\tau$ is the maximum step limit: + +$$ +\mathbf {A} \sim \mathcal {P} \left(\left\{\mathbf {a} _ {1}, \dots , \mathbf {a} _ {| A |}, \mathbf {r} \right\} \mid \mathbf {Q}, \mathbf {L}\right) \text {s . t .} \sum_ {t = 1} ^ {| \mathbf {A} |} \| \mathbf {a} _ {i} \| \leq \tau \tag {2} +$$ + +This formulation mirrors human problem-solving by decomposing proofs into executable visual-textual steps, validated via coordinate-based tools like matplotlib and equation solver. + +Visual Construction We emphasize the criticality of incremental visual information for accurate solutions, where multi-step graphical representations originate from the progressive construction of auxiliary lines. This multi-stage approach facilitates search algorithm-enhanced refinement of auxiliary line generation, significantly improving LVLM capabilities in geometric reasoning. Consistent with Sketchpad methodology, we exclusively utilize common Python libraries (e.g., matplotlib) for diagram rendering. + +Algebraic Computation Unlike general tasks, solving geometry problems cannot rely solely on visual construction or the model's inherent capabilities; instead, it necessitates the use of computational tools to achieve precise and accurate results. This requirement stems from the need for exact numerical solutions and the mitigation of potential errors in geometric reasoning. Through systematic + +
ModelGPT-4oQwen2-VL-72B-InstructClaude-3.5-sonnet
Geomverse-109CoT11.15.614.4
VisualSketchpad8.96.716.7
VisualSketchpad + Equation Solver13.311.117.8
VisuoThink w/o rollout search (ours)24.419.026.7
VisuoThink (ours)28.925.627.8
Geometry3K(Lu et al., 2021)CoT20.818.837.5
VisualSketchPad22.917.039.6
VisualSketchpad + Equation Solver25.014.941.7
VisuoThink w/o rollout search (ours)27.120.837.5
VisuoThink (ours)33.325.043.8
+ +Table 1: The 1-shot benchmark results (Accuracy@1) on Geometry including Geomverse-109 and Geometry3k of SOTA large visual language models. For GPT-4o and Claude-3.5-sonnet, we employ newest cutoffs (gpt-4o-2024-11-20 and claude-3-5-sonnet-20241022) separately. The gray part indicates results from VisuoThink and bold results represent the best performance. + +
ModelDatasetVisual NavigationVisual Tiling
Subset (Num. Samples)level-3 (16)level-4 (31)level-5 (62)level-2 (119)
GPT-4oCoT18.83.20.00.8
VoT25.00.00.01.7
VoT + Executor62.59.74.812.6
VisuoThink w/o rollout search (ours)81.232.311.319.3
VisuoThink (ours)93.861.319.451.2
Qwen2-VL-72B-InstructCoT6.73.2-0.0
VoT0.00.0-0.8
VoT + Executor25.03.2-6.7
VisuoThink w/o rollout search (ours)50.06.5-9.2
VisuoThink (ours)81.312.9-20.2
Claude-3.5-sonnetCoT37.53.20.00.8
VoT56.30.00.02.5
VoT + Executor68.822.616.110.1
VisuoThink w/o rollout search (ours)81.238.741.980.7
VisuoThink (ours)93.861.353.284.0
+ +Table 2: The Pass@1 performance comparison on spatial reasoning benchmarks including Visual Navigation and Visual Tiling across SOTA LVLMs. The gray part indicates results from VisuoThink and bold results represent the best performance. The results of Qwen2-VL-72B-Instruct on Visual Navigation ( $k = 5$ ) are masked out due to its restrained performance on the subset. The results from VoT with Executor are also reported, where the models utilize the unreliable visual hints generated by themselves rather than executor, consistent with the VoT framework. + +integration, like VPD (Zhao et al., 2023), and VisualStechpad (Hu et al., 2024), phase II employs Python code execution for precise computation to mitigate LVLM hallucination risks. Furthermore, the model constructs single-variable algebraic equations based on identified geometric relationships, subsequently invoking equation solvers for numerical resolution. + +# 4.1 Empirical Results + +Setup We conduct comprehensive evaluations on the challenging Geometry3K and Geomverse-109 datasets to demonstrate the methodological superiority. Especially we detail the trajectory of Geomverse-109 dataset synthesis in appendix E. SOTA closed-source models including gpt-4o2024-11-20 and claude-3-5-sonnet-20241022 are + +leveraged for inference. To ensure architectural diversity, open-source model (e.g., Qwen2-VL-72B) were incorporated; however, smaller-parameter open-source variants were excluded due to their capability constraints. And we detail the model and algorithm hyperparameters in appendix D. + +Analysis Our empirical results reveal that, even without rollout search augmentation, our strategy substantially enhances LVLM reasoning capabilities compared to Chain-of-Thought (CoT) (MiTRA et al., 2023) and Visual Sketchpad (Hu et al., 2024) baselines. Notably, on the Geomverse-109 (Kazemi et al., 2023) benchmark, VisuoThink outperforms CoT and Visual Sketchpad by an average of $17.1\%$ and $16.7\%$ across all evaluated models, and predictive rollout search further + +![](images/20b92721a14115ff4a0cef767e26085517041cdebeaa23a8db3c478939d7fe61.jpg) +Figure 3: The illustration of spatial reasoning tasks derived from VoT (Wu et al., 2024), including Visual Navigation and Visual Tiling. LVLM is required to execute a sequence of actions to complete certain goals. Our experimental setting makes them much more challenging and closer to real-environment deployment. + +enhances models' performance by an average of $4.1\%$ . Also, the employment of equation solver on Visual Sketchpad also increases an average performance of $3.3\%$ . This performance gap likely stems from Geomverse's emphasis on geometric relationship construction, where our equation-solving framework helps to accurately get intermediate answers and enables efficient resolution of structurally complex problems. The systematic integration of geometric analysis tools further mitigates error propagation inherent in conventional LVLM reasoning baselines. + +# 5 Spatial Reasoning with VisuoThink + +Spatial reasoning, defined as the cognitive capability to interpret spatial object relationships, motion dynamics, and environmental interactions, constitutes a foundational requirement for mission-critical applications such as robotic systems, autonomous navigation, and augmented reality. These domains demand robust integration of visual perception and precise manipulation of spatial-temporal constraints for optimal action planning. + +Task Formulation Building upon the Visualization of Thought (VoT) (Wu et al., 2024) benchmarks, we design two challenging spatial reasoning benchmarks with enhanced complexity as shown in figure 3: Visual Navigation and Visual Tiling. We provide detailed materials of the differences between the original VoT benchmark setup and our experimental configuration in Appendix B and additionally provide the mathematical task formu + +lation in appendix C. + +Visual Construction via Executor During task execution, robots deployed in true environments typically receive environmental feedback following each action, which facilitates perception and subsequent decision-making processes. In our methodology, we leverage environmental interaction tools to enhance the model's spatial reasoning capabilities. In each action, we employ an executor to implement the corresponding action, and return textual execution feedback and visuospatial hint (optional) representing the map state. In the context of (1) Visual Navigation, the visual feedback corresponds to the map including agent's current position; while in (2) Visual Tiling scenarios, it represents the current state of rectangle occupation patterns. + +# 5.1 Empirical Results + +Setup We evaluate our framework on two spatial reasoning benchmarks: Visual Navigation and Visual Tiling. For Visual Navigation, we create three difficulty levels with increasing map complexity, where the level indicates the $k$ for Visual Navigation as shown in table 2. For Visual Tiling, we focus on level-2 (i.e. $k = 2$ ) problems with 119 samples. We compare our method against Chain-of-Thought (CoT), Visualization of Thought (VoT) (Wu et al., 2024). As table 2 indicates, the results from VoT with tool interactions (i.e. Executor) are also reported, where textual feedbacks are employed but the visual hints are still generated by the model rather from executor, consistent with the VoT framework. The source of visual hints distinguishes it from our method. We employ the same + +![](images/6d288c79a237e2ab479ac33116fe8a946a09484b0885513867ef20b7464ff990.jpg) +Figure 4: (LEFT) The trend of Pass@1 rate on Visual Navigation as the number of reasoning steps increases. (RIGHT) The relationship between the Accuracy@1 on geometry problems (Geomverse) and tree width for rollout search. We observe that LVLMs significantly benefit from longer reasoning chains, although the effect plateaus rapidly beyond a certain threshold of reasoning steps. The relationship between performance and tree width exhibits a more complex pattern, demonstrating an inverted U-shaped trend with both GPT-4o and Claude-3.5-Sonnet. + +![](images/9431609d3275408fd7e42656f19f0a2fd6844e082a9d6e1281f04a2478c4525c.jpg) + +temperature and VisuoThink hyperparameters as section 4.1. + +Analysis In spatial reasoning experiments, VisuoThink demonstrates significant performance improvements over baseline methods, particularly when augmented with predictive rollout search. As shown in Table 2, VisuoThink achieves the highest accuracy across all tasks, outperforming both CoT and VoT baselines. For instance, on the Visual Navigation task, VisuoThink on GPT-4o achieves a $93.8\%$ accuracy at level-3, compared to $62.5\%$ for VoT with an executor and $18.8\%$ for CoT. This trend is consistent across different model architectures, including GPT-4o, Qwen2-VL-72B-Instruct, and Claude-3.5-sonnet, highlighting the robustness of our approach. + +Similar to the geometry experiments in Section 4, the integration of tool interactions and multi-step visual reasoning plays a critical role in enhancing performance. The executor's feedback mechanism, which provides visual updates after each action, mirrors the incremental visual refinement seen in geometry tasks, where auxiliary lines are progressively constructed. For instance, VisuoThink without rollout search demonstrates an average improvement of $34.7\%$ on Visual Tiling across diverse models. We observe that while VoT augmented with textual feedback achieves an average increase of $8.1\%$ , its performance gain is notably less pronounced compared to VisuoThink without rollout search. This underscores the critical role of reliable visual cues in enhancing reasoning capa + +bilities. The dynamic interaction allows the model to iteratively refine its reasoning path, leading to more accurate solutions. + +# 6 Discussion + +In this section, we analyze key aspects of VisuoThink's performance. We examine how the length of reasoning chain affects spatial reasoning, the impact of child node expansion in rollout search, and the influence of supervision levels in predictive rollouts across tasks. These insights highlight VisuoThink's effectiveness and suggest future directions for multimodal reasoning frameworks. + +# 6.1 Could Longer Reasoning Chains Assist LVLMs in Reasoning? + +In practical applications of $LVLMs$ for spatial reasoning tasks, each tool invocation can be seen as an agent attempting an action in the environment and receiving feedback. Although many attempts may be inaccurate, allowing the model more trial-and-error opportunities before achieving the final goal could potentially enhance its reasoning capabilities. By setting different upper limits on the number of reasoning steps in visual navigation tasks, we observe a positive correlation between the number of reasoning steps and the model's task completion rate. This suggests that the model indeed benefits from more tool invocations and longer reasoning. + +However, as the number of reasoning steps increases, the completion rate gradually converges, + +making further significant improvements challenging. As shown in figure 4 (left), for instance, increasing reasoning steps from 10 to 20 resulted in substantial performance gains $(+54.1\%$ and $+48.4\%)$ across different LVLM architectures (GPT-4o and Claude-3.5-sonnet). However, when reasoning steps were increased from 20 to 40, the performance growth slowed dramatically, dropping to $+6.5\%$ and $+2.1\%$ , respectively. This phenomenon aligns with expectations, as merely increasing the number of tool invocations does not enable the model to better solve the most challenging samples. This underscores the necessity of techniques like rollout search within the broader context of test scaling. + +# 6.2 Could Larger Tree Span Enhances VisuoThink's Performance? + +Predictive rollouts enhance the model's reasoning capabilities, which can be viewed as a tangible outcome of successfully expanding the model's reasoning search space. A natural question arises: Can we further improve the model's reasoning performance on benchmarks simply by increasing the number of candidate child nodes at each selection step, i.e., expanding the tree width, thereby enhancing model's reasoning capability? To investigate this, we conducted comparative experiments on geometry tasks using GPT-4o and Claude-3.5-sonnet, keeping the depth of the reasoning tree constant while varying the number of candidate child nodes. + +As presented in figure 4 (right), we observed an inverted U-shaped trend in overall performance as the number of candidate tree nodes increased across different model architectures. Notably, when the number of candidate child nodes equals 1, the model follows a single reasoning path, effectively bypassing predictive rollout search. Contrary to expectations, the performance trend initially rises and then declines. This counterintuitive result can be attributed to the inherent errors in the model's evaluation of child nodes. Simply and aggressively increasing the tree width leads to confusion in selecting child nodes, which in turn reduces overall reasoning efficiency. Thus, an interesting conclusion emerges: we cannot expect to continuously improve model performance by merely increasing the number of child nodes in rollout search. + +![](images/cd4ff9015c99503804b28e55daf60fd50c95fbfc7ca470586add4f184fcd4796.jpg) +Figure 5: The performance gain $(+ \%)$ on tasks through predictive rollout search. The performance gain is calculated via the performance gap between VisuoThink (w/o rollout search) and VisuoThink. + +# 6.3 Strong v.s. Weak Supervision in Predictive Rollout Search + +An intriguing observation is that the strength of guidance provided by predictive rollout results varies between geometry and spatial reasoning tasks. In geometry tasks, the model only receives the final numerical results of the problem, whereas in spatial reasoning tasks, the model has access to visual states of stronger supervision (e.g., the agent's final position, the position of the destination, etc.). In other words, predictive rollouts in geometry tasks offer weaker supervision, while those in spatial reasoning tasks provide stronger supervision. + +This observation aligns with the findings of the Deepseek R1 report, which highlights that outcome-based supervision in RL can significantly enhance Deepseek-R1-Zero's reasoning capabilities (DeepSeek-AI, 2025). The effectiveness of such supervision stems from its strong supervisory signal, and predictive rollouts with strong supervision are more effective in improving model reasoning performance. This is further supported by our experimental results, as illustrated in figure 5, where predictive rollouts demonstrated more substantial performance gains in spatial reasoning tasks compared to geometry tasks, across both open-source and closed-source models. The detailed performance gain results are presented in appendix A. + +# 7 Conclusion + +We present VisuoThink, a multimodal tree search framework enhancing LVLM reasoning through + +dynamic visual-textual interleaving and predictive rollout search. Our approach demonstrates significant improvements across geometry and spatial reasoning tasks without requiring model fine-tuning. Empirical results show substantial performance gains on geometry and spatial reasoning benchmarks. Our analysis reveals key insights about tool interaction benefits, search space optimization, and supervision strength in multimodal reasoning. These findings open new possibilities for advancing LVLM capabilities in complex reasoning tasks. + +# Limitations + +Despite its strong performance, VisuoThink has several limitations. First, the predictive rollout search process introduces significant computational overhead, making it potentially impractical for real-time applications. Second, our approach particularly relies on tool interactions for stronger capability, which may require more effort in some specific deployment environments. Third, the framework's effectiveness is constrained by the quality of the base VLM's reasoning capabilities - while it enhances performance, it cannot overcome fundamental model limitations. Finally, our evaluation focuses primarily on geometric and spatial reasoning tasks. + +# Ethics and Reproducibility Statements + +Ethics We take ethical considerations very seriously and strictly adhere to the ACL Ethics Policy. This paper proposes a test-time slow-thinking framework to improve the multimodal reasoning ability of current LVLMs. All evaluation datasets used in this paper will be publicly available or have been widely adopted by researchers. Thus, we believe that this research will not pose ethical issues. + +Reproducibility In this paper, we discuss the detailed experimental setup, such as hyperparameters, implementation of algorithm, and statistic descriptions. More importantly, we will open source our code and data in the future to help reproduce the experimental results of this paper. + +# References + +Afra Amini, Tim Vieira, and Ryan Cotterell. 2024. Variational best-of-n alignment. *ArXiv*, abs/2407.06057. +Guoxin Chen, Minpeng Liao, Chengxi Li, and Kai Fan. 2024. Alphamath almost zero: process supervision without process. ArXiv, abs/2405.03553. +Anoop Cherian, Kuan-Chuan Peng, Suhas Lohit, Joanna Matthiesen, Kevin Smith, and Joshua B Tenenbaum. 2024. Evaluating large vision-and-language models on children's mathematical olympiads. arXiv preprint arXiv:2406.15736. +DeepSeek-AI. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. Preprint, arXiv:2501.12948. +Yifan Du, Zikang Liu, Yifan Li, Wayne Xin Zhao, Yuqi Huo, Bingning Wang, Weipeng Chen, Zheng Liu, Zhongyuan Wang, and Jiahui Wen. 2025. Virgo: A preliminary exploration on reproducing o1-like mllm. +Xidong Feng, Ziyu Wan, Muning Wen, Ying Wen, Weinan Zhang, and Jun Wang. 2023. Alphazero-like tree-search can guide large language model decoding and training. ArXiv, abs/2309.17179. +Timin Gao, Peixian Chen, Mengdan Zhang, Chaoyou Fu, Yunhang Shen, Yan Zhang, Shengchuan Zhang, Xiawu Zheng, Xing Sun, Liujuan Cao, and Rongrong Ji. 2024. Cantor: Inspiring multimodal chain-of-thought of mllm. ArXiv, abs/2404.16033. +Lin Gui, Cristina Garbacea, and Victor Veitch. 2024. Bonbon alignment for large language models and the sweetness of best-of-n sampling. ArXiv, abs/2406.00832. +Yushi Hu, Weijia Shi, Xingyu Fu, Dan Roth, Mari Ostendorf, Luke S. Zettlemoyer, Noah A. Smith, and Ranjay Krishna. 2024. Visual sketchpad: Sketching as a visual chain of thought for multimodal language models. ArXiv, abs/2406.09403. +Daniel Kahneman. 2011. Thinking, fast and slow. Farrar, Straus and Giroux. +Mehran Kazemi, Hamidreza Alvari, Ankit Anand, Jialin Wu, Xi Chen, and Radu Soricut. 2023. Geomverse: A systematic evaluation of large models for geometric reasoning. Preprint, arXiv:2312.12241. +Xuanyu Lei, Zonghan Yang, Xinrui Chen, Peng Li, and Yang Liu. 2024. Scaffolding coordinates to promote vision-language coordination in large multi-modal models. In International Conference on Computational Linguistics. +Chengzu Li, Wenshan Wu, Huanyu Zhang, Yan Xia, Shaoguang Mao, Li Dong, Ivan Vuli'c, and Furu Wei. 2025. Imagine while reasoning in space: Multimodal visualization-of-thought. + +Jiacheng Liu, Andrew Cohen, Ramakanth Pasunuru, Yejin Choi, Hannaneh Hajishirzi, and Asli Celikyilmaz. 2023. Don't throw away your value model! generating more preferable text with value-guided monte-carlo tree search decoding. +Pan Lu, Ran Gong, Shibiao Jiang, Liang Qiu, Siyuan Huang, Xiaodan Liang, and Song-Chun Zhu. 2021. Inter-gps: Interpretable geometry problem solving with formal language and symbolic reasoning. Preprint, arXiv:2105.04165. +Chancharik Mitra, Brandon Huang, Trevor Darrell, and Roei Herzig. 2023. Compositional chain-of-thought prompting for large multimodal models. 2024 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 14420-14431. +Debjyoti Mondal, Suraj Modi, Subhadarshi Panda, Ritraj Singh, and Godawari Sudhakar Rao. 2024. Kamcot: Knowledge augmented multimodal chain-of-thoughts reasoning. In AAAI Conference on Artificial Intelligence. +OpenAI. 2024a. Gpt-4o system card. Preprint, arXiv:2410.21276. +OpenAI. 2024b. Learning to reason with llms. +Runqi Qiao, Qiuna Tan, Guanting Dong, Minhui Wu, Chong Sun, Xiaoshuai Song, Zhuoma GongQue, Shanglin Lei, Zhe Wei, Miaoxuan Zhang, and 1 others. 2024. We-math: Does your large multimodal model achieve human-like mathematical reasoning? arXiv preprint arXiv:2407.01284. +Santhosh Kumar Ramakrishnan, Erik Wijmans, Philipp Kraehenbuehl, and Vladlen Koltun. 2024. Does spatial cognition emerge in frontier models? Preprint, arXiv:2410.06468. +Noah Shinn, Federico Cassano, Edward Berman, Ashwin Gopinath, Karthik Narasimhan, and Shunyu Yao. 2023. Reflexion: Language agents with verbal reinforcement learning. Preprint, arXiv:2303.11366. +Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. 2024. Scaling llm test-time compute optimally can be more effective than scaling model parameters. ArXiv, abs/2408.03314. +Hanshi Sun, Momin Haider, Ruiqi Zhang, Huitao Yang, Jiahao Qiu, Ming Yin, Mengdi Wang, Peter Bartlett, and Andrea Zanette. 2024. Fast best-of-n decoding via speculative rejection. ArXiv, abs/2410.20290. +Gemini Team. 2024. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. Preprint, arXiv:2403.05530. +Omkar Thawakar, Dinura Dissanayake, Ketan More, Ritesh Thawkar, Ahmed Heakl, Noor Ahsan, Yuhao Li, Mohammed Zumri, Jean Lahoud, Rao Muhammad Anwer, Hisham Cholakkal, Ivan Laptev, Mubarak Shah, Fahad Shahbaz Khan, and Salman H. Khan. 2025. Llamav-o1: Rethinking step-by-step visual reasoning in llms. + +Wenshan Wu, Shaoguang Mao, Yadong Zhang, Yan Xia, Li Dong, Lei Cui, and Furu Wei. 2024. Mind's eye of llms: Visualization-of-thought elicits spatial reasoning in large language models. +Kun Xiang, Zhili Liu, Zihao Jiang, Yunshuang Nie, Runhui Huang, Haoxiang Fan, Hanhui Li, Weiran Huang, Yihan Zeng, Jianhua Han, Lanqing Hong, Hang Xu, and Xiaodan Liang. 2024. Atomthink: A slow thinking framework for multimodal mathematical reasoning. Preprint, arXiv:2411.11930. +Yuxi Xie, Kenji Kawaguchi, Yiran Zhao, Xu Zhao, MingSung Kan, Junxian He, and Qizhe Xie. 2023. Self-evaluation guided beam search for reasoning. In Neural Information Processing Systems. +Guowei Xu, Peng Jin, Hao Li, Yibing Song, Lichao Sun, and Li Yuan. 2024. Llava-cot: Let vision language models reason step-by-step. ArXiv, abs/2411.10440. +Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Ehsan Azarnasab, Faisal Ahmed, Zicheng Liu, Ce Liu, Michael Zeng, and Lijuan Wang. 2023. Mm-react: Prompting chatgpt for multimodal reasoning and action. ArXiv, abs/2303.11381. +Huanjin Yao, Jiaxing Huang, Wenhao Wu, Jingyi Zhang, Yibo Wang, Shunyu Liu, Yingjie Wang, Yuxin Song, Haocheng Feng, Li Shen, and Dacheng Tao. 2024. Mulberry: Empowering mllm with o1-like reasoning and reflection via collective monte carlo tree search. ArXiv, abs/2412.18319. +Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik Narasimhan, and Yuan Cao. 2023. React: Synergizing reasoning and acting in language models. Preprint, arXiv:2210.03629. +Fei Yu, Anningzhe Gao, and Benyou Wang. 2023. Ovm, outcome-supervised value models for planning in mathematical reasoning. In NAACL-HLT. +Zhiyuan Zeng, Qinyuan Cheng, Zhangyue Yin, Bo Wang, Shimin Li, Yunhua Zhou, Qipeng Guo, Xuanjing Huang, and Xipeng Qiu. 2024. Scaling of search and learning: A roadmap to reproduce o1 from reinforcement learning perspective. Preprint, arXiv:2412.14135. +Zhuosheng Zhang, Aston Zhang, Mu Li, Hai Zhao, George Karypis, and Alexander J. Smola. 2023. Multimodal chain-of-thought reasoning in language models. Trans. Mach. Learn. Res., 2024. +Wenliang Zhao, Yongming Rao, Zuyan Liu, Benlin Liu, Jie Zhou, and Jiwen Lu. 2023. Unleashing text-to-image diffusion models for visual perception. Preprint, arXiv:2303.02153. +Qiji Zhou, Ruochen Zhou, Nike Hu, Panzhong Lu, Siyang Gao, and Yue Zhang. 2024. Image-of-thought prompting for visual reasoning refinement in multimodal large language models. ArXiv, abs/2405.13872. + +# A Performance Gain of VisuoThink Through Predictive Rollout Search + +This appendix quantifies the performance improvements achieved by integrating predictive rollout search into the VisuoThink framework across geometry and spatial reasoning tasks. The performance gain through predictive rollout search is derived by subtracting the performance of VisuoThink (w/o rollout search) from those of the VisuoThink on models. + +As shown in Table 3, tasks with strong supervision (e.g., Visual Navigation and Visual Tiling) exhibit significantly higher gains compared to weak supervision tasks (e.g., Geometry $3K$ and Geomverse-109). For instance, under strong supervision, Claude-3.5-Sonnet achieves a $+25.1\%$ improvement in Visual Navigation, while GPT-4o attains $+16.6\%$ in Visual Tiling. In contrast, weak supervision tasks like Geomverse-109 only show modest gains (e.g., $+5.4\%$ for GPT-4o). + +# B OKSpatial Reasoning Task Setting + +Our formulation extends beyond VoT's basic requirements by mandating LVLMs to generate comprehensive operational specifications - for instance, requiring explicit output of both movement directions and precise step counts at each decision node. This advancement creates more realistic and functionally grounded spatial reasoning evaluations (e.g., robotic navigation emulation in real world). + +This appendix details the task formulation differences between VisuoThink and baseline methods (Table 4 and Table 5). For Visual Navigation, VisuoThink requires fine-grained, executable and explicit specification of both direction and step count in action sequences, whereas VoT focuses solely on direction navigation. This formulation mirrors real-world robotic navigation, where precise movement planning is critical. Similarly, in Visual Tiling, VisuoThink mandates detailed actions, including polyomino variant types, block positions, and action types (e.g., "fit" or "remove"), while VoT simplifies the task by omitting variant specifications. + +# C Task Formulation of Spatial Reasoning Tasks + +Building upon VoT (Wu et al., 2024) framework, our challenging benchmarks comprise: + +- Visual Navigation evaluates LVLMs in a simulated 2D grid environment, where agents must navigate from initial position $\mathbf{s}_0$ to destination $\mathbf{s}_k$ through obstacle-laden paths. The formal problem is defined by grid map $\mathbf{M}$ containing $k$ interconnected edges $\mathbf{E} = \{\mathbf{e}(\mathbf{s}_0,\mathbf{s}_1),\mathbf{e}(\mathbf{s}_1,\mathbf{s}_2),\ldots ,\mathbf{e}(\mathbf{s}_{k - 1},\mathbf{s}_k)\}$ . The LVLM should generate a sequence of executable actions in json format $\mathbf{A} = \{(\mathbf{d}_0,\mathbf{l}_0),(\mathbf{d}_1,\mathbf{l}_1),\ldots ,(\mathbf{d}_{|\mathbf{A}| - 1},\mathbf{l}_{|\mathbf{A}| - 1})\}$ , where each tuple specifies movement direction $\mathbf{d}_i$ and exact step count $\mathbf{l}_i$ , governed by the policy: + +$$ +\mathbf {a} _ {\mathbf {t}} \sim \mathcal {P} \left(\mathbf {d} _ {t}, \mathbf {l} _ {t} \mid \mathbf {A} _ {t - 1}, \mathbf {M}\right) \tag {3} +$$ + +- Visual Tiling is a classic geometric reasoning challenge, this task assesses polyomino composition capabilities within confined rectangular regions $\mathbf{R}$ masked by $k$ distinct polyominoes $\mathbf{MP} = \{\mathbf{mp}_1,\dots ,\mathbf{mp}_k\}$ . The LVLM must output action sequences $\mathbf{a}_t = (\mathbf{p}_t,\{\mathbf{b}_1,\dots ,\mathbf{b}_{|B|}\},\mathbf{a}\mathbf{t}_t)$ , where $\mathbf{p}_t$ and $\mathbf{B} = \{\mathbf{b}_1,\dots ,\mathbf{b}_{|\mathbf{B}|}\}$ respectively indicate the selected polyomino type and the coordinates of the placement blocks. $\mathbf{at}_t\in \{\text{fit, remove}\}$ indicates the action type modifying rectangular state $\mathbf{R}_t$ , thus formalized as: + +$$ +\mathbf {a} _ {t} \sim \mathcal {P} \left(\mathbf {p} _ {t}, \mathbf {B}, \mathbf {a t} _ {t} \mid \mathbf {R} _ {t - 1}, \mathbf {M P}, \mathbf {A} _ {t - 1} \}\right) \tag {4} +$$ + +Though the required actions are polyomino variant-aware as shown in table 5. As the polyomino variant type is implicitly expressed in the block positions, LVLM does not need to explicitly output it in actions anymore. + +
Supervision TypePerformance GainGPT-4oQwen2-VL-72BClaude-3.5-Sonnet
Strong SupervisionΔ Visual Navigation (%)+16.6+18.9+15.5
Δ Visual Tiling (%)+31.9+11.0+3.3
Δ Average (%)+24.3+15.0+9.4
Weak SupervisionΔ Geometry3K (%)+4.5+6.6+1.1
Δ Geomverse-109 (%)+6.2+4.2+6.3
Δ Average (%)+5.4+5.4+3.7
+ +Table 3: Detailed performance gain of VisuoThink through predictive rollout search on benchmarks from Geometry and Spatial Reasoning over variable LVLM models. + +
MethodDirectionStepsTarget
Visual NavigationVoT×Navigate from the starting position
VisuoThinkto the destination.
+ +Table 4: Visual Navigation task setting differences between VoT and VisuoThink. + +
MethodActionTarget
Polyomino TypeVariant TypeBlock PositionsAction Type
Visual TilingVoTTo identify the correct variant for a polyomino in one action.
VisuoThinkTo fill the rectangle with feasible polyomino variants.
+ +Table 5: Visual Tiling task setting differences between VoT and VisuoThink. + +# D Model and VisuoThink Hyperparameters + +We detail the model and VisuoThink Hyperparameters: + +Model Hyperparameters To ensure experimental fairness, we uniformly constrained the number of reasoning steps (i.e., $\tau$ , the depth of the reasoning tree) to 10 across all experiments. During predictive rollout search, we set the number of sampled child nodes to 3, and we discuss its impact in section 6.2. + +VisuoThink Hyperparameters While VisuoThink employed a temperature of 0.8 when sampling child nodes, all other model invocations, including the baselines (e.g. CoT, VoT, VisualSketchpad, VisuoThink w/o rollout search), were conducted with temperature set to $O$ for frontier performance. During the voting phase, we similarly maintained a temperature of $O$ and implemented single-vote sampling, which not only reduced computational overhead in terms of model calls but also achieved comparable performance. + +# E Geomverse-109 Problem Generation Trajectory + +We establish a pipeline translating textual problems into problems with matplotlib-executable code. Be + +yond the Geometry3K (Lu et al., 2021) dataset (48 problems) utilized in Sketchpad, we incorporate the D2 subset of Geomverse (Kazemi et al., 2023) to construct an slightly bigger dataset Geomverse-109 (90 problems). The original Geomverse dataset crucially includes annotated point coordinates essential for systematic problem synthesis. During the data synthesis phase, we first randomly choose 109 problems, then LVLMs generate corresponding high-quality Python code through LLM self-reflection (Shinn et al., 2023), then we filter out problems with poor diagram quality. \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09130/images/19e64f65ee71978cf7c2743f225b1ee49725bfe66878d73eeb991ee47bf4ad4f.jpg b/data/2025/2504_09xxx/2504.09130/images/19e64f65ee71978cf7c2743f225b1ee49725bfe66878d73eeb991ee47bf4ad4f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a6ffb85d88d760451a15b559be9e94108fa03b00 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09130/images/19e64f65ee71978cf7c2743f225b1ee49725bfe66878d73eeb991ee47bf4ad4f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2893c00ae61a2ce06cc959b226c47479a42ceada98a03d1280f43b83894b0625 +size 6031 diff --git a/data/2025/2504_09xxx/2504.09130/images/20b92721a14115ff4a0cef767e26085517041cdebeaa23a8db3c478939d7fe61.jpg b/data/2025/2504_09xxx/2504.09130/images/20b92721a14115ff4a0cef767e26085517041cdebeaa23a8db3c478939d7fe61.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0c9563c6d93aabdf401a9fc8eff212c1c103bb07 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09130/images/20b92721a14115ff4a0cef767e26085517041cdebeaa23a8db3c478939d7fe61.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c66c4a06720c42a0879deb40713e0b6f304e762576d218eae92fdefd9dd4cfa9 +size 93204 diff --git a/data/2025/2504_09xxx/2504.09130/images/29ed049967b9b04a6c7dcd056d68c10988ff9f39a5f29f8d545483f002908c51.jpg b/data/2025/2504_09xxx/2504.09130/images/29ed049967b9b04a6c7dcd056d68c10988ff9f39a5f29f8d545483f002908c51.jpg new file mode 100644 index 0000000000000000000000000000000000000000..91d65db01e0faf666d35fee9df8927fc340827c0 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09130/images/29ed049967b9b04a6c7dcd056d68c10988ff9f39a5f29f8d545483f002908c51.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7aa9e18d6665121330baa385b41539c0b88622e3e3c2348c7e52cf035c34384e +size 74711 diff --git a/data/2025/2504_09xxx/2504.09130/images/4c3f7c05a73cf0d6a4150e4bdd1954214451acc2c175ad7f630e3d5fdb677bbd.jpg b/data/2025/2504_09xxx/2504.09130/images/4c3f7c05a73cf0d6a4150e4bdd1954214451acc2c175ad7f630e3d5fdb677bbd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7b80b15a36cb4ca453ef954d0f082143b2ff292b --- /dev/null +++ b/data/2025/2504_09xxx/2504.09130/images/4c3f7c05a73cf0d6a4150e4bdd1954214451acc2c175ad7f630e3d5fdb677bbd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae242fb227f545cfaeb7ebd26ae3ffc6db4c7585d47c3da2bc36ff7f1260febf +size 32377 diff --git a/data/2025/2504_09xxx/2504.09130/images/52f987e1a32a2d4014ff1a9c4711e76163fd34e624888813f4d62b0f8e10f06d.jpg b/data/2025/2504_09xxx/2504.09130/images/52f987e1a32a2d4014ff1a9c4711e76163fd34e624888813f4d62b0f8e10f06d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9ee0ca5e1a1c22bc43a70ff3a1704201d24f284a --- /dev/null +++ b/data/2025/2504_09xxx/2504.09130/images/52f987e1a32a2d4014ff1a9c4711e76163fd34e624888813f4d62b0f8e10f06d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:569fa4b1ae915228bd6ca2598a720003041889e0bc82e318b8ffe3ad6509167d +size 5947 diff --git a/data/2025/2504_09xxx/2504.09130/images/6d288c79a237e2ab479ac33116fe8a946a09484b0885513867ef20b7464ff990.jpg b/data/2025/2504_09xxx/2504.09130/images/6d288c79a237e2ab479ac33116fe8a946a09484b0885513867ef20b7464ff990.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c7e1b7559f97a9447450a89ffc1de56d252cdcc3 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09130/images/6d288c79a237e2ab479ac33116fe8a946a09484b0885513867ef20b7464ff990.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:123d0a4a748d6737c3c5325d138a153b7e6c8f34b990c7e2803fb9d8c4813411 +size 23531 diff --git a/data/2025/2504_09xxx/2504.09130/images/7ec840b1973798c457f4be3a21bf34893e11feae173b9b3dfa750a44bdc86c48.jpg b/data/2025/2504_09xxx/2504.09130/images/7ec840b1973798c457f4be3a21bf34893e11feae173b9b3dfa750a44bdc86c48.jpg new file mode 100644 index 0000000000000000000000000000000000000000..18e541144821f1918f8a3d647b5c2b97fc37ebf4 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09130/images/7ec840b1973798c457f4be3a21bf34893e11feae173b9b3dfa750a44bdc86c48.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eed4f0d41eff220949fffa789147f58db8d08503697d93155352a22c2e285df4 +size 79060 diff --git a/data/2025/2504_09xxx/2504.09130/images/8beb56cc31f29e1a3c4943714634b9fb69453ae0a5a20d7250521aaf41b59cc2.jpg b/data/2025/2504_09xxx/2504.09130/images/8beb56cc31f29e1a3c4943714634b9fb69453ae0a5a20d7250521aaf41b59cc2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b324c0c4f93af34db7b7f390169ecad34851c3cb --- /dev/null +++ b/data/2025/2504_09xxx/2504.09130/images/8beb56cc31f29e1a3c4943714634b9fb69453ae0a5a20d7250521aaf41b59cc2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:84415c3c61bef578b8214113855a10249d78ec85ff29c211e26ce03215b5ad38 +size 54984 diff --git a/data/2025/2504_09xxx/2504.09130/images/91b03a9f72e061c8c659c7721beb2d061621a2d5b30b55e226084a1e7228568a.jpg b/data/2025/2504_09xxx/2504.09130/images/91b03a9f72e061c8c659c7721beb2d061621a2d5b30b55e226084a1e7228568a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..381a1fc5909e85ff98c46b44d2093d46d3b5e979 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09130/images/91b03a9f72e061c8c659c7721beb2d061621a2d5b30b55e226084a1e7228568a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:adc04356a8194457258a3fcfb1a05ad4dc2da150cddf15b8843058e2cec6322d +size 44255 diff --git a/data/2025/2504_09xxx/2504.09130/images/9431609d3275408fd7e42656f19f0a2fd6844e082a9d6e1281f04a2478c4525c.jpg b/data/2025/2504_09xxx/2504.09130/images/9431609d3275408fd7e42656f19f0a2fd6844e082a9d6e1281f04a2478c4525c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..65e5e21b65fddcf0f670c7fd8e8aedda2b5faedd --- /dev/null +++ b/data/2025/2504_09xxx/2504.09130/images/9431609d3275408fd7e42656f19f0a2fd6844e082a9d6e1281f04a2478c4525c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9033199962bc113e5f7d5f988989db03dc0a9270669edbd9b5d8cb0bc631081d +size 23020 diff --git a/data/2025/2504_09xxx/2504.09130/images/99750640e65a54fd353d13a477f47d9eb452145299e0e8cc39dc95e23dfbf8b8.jpg b/data/2025/2504_09xxx/2504.09130/images/99750640e65a54fd353d13a477f47d9eb452145299e0e8cc39dc95e23dfbf8b8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..68b4978171d588ee04b6235e7e3d88857c7c6058 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09130/images/99750640e65a54fd353d13a477f47d9eb452145299e0e8cc39dc95e23dfbf8b8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:985f2de0bc2ba262ea9ff1112211af5bbd765224c19075d391e6ac9e6fa2f128 +size 6351 diff --git a/data/2025/2504_09xxx/2504.09130/images/ac64c958c970ea6e08d36c9e2fd472841989265e83951721a027f0c92d45d633.jpg b/data/2025/2504_09xxx/2504.09130/images/ac64c958c970ea6e08d36c9e2fd472841989265e83951721a027f0c92d45d633.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5d5b52949166d331c25ec642934418e8394da3c0 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09130/images/ac64c958c970ea6e08d36c9e2fd472841989265e83951721a027f0c92d45d633.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e509aaa61ee4db5c7381a4f4ec2333a56b2a71a349792d032f41080debd7b2d +size 4292 diff --git a/data/2025/2504_09xxx/2504.09130/images/b71e7927fc17a4de44c8768ddd0efc75420e0d30f5c8c396b6e61ee8eb632908.jpg b/data/2025/2504_09xxx/2504.09130/images/b71e7927fc17a4de44c8768ddd0efc75420e0d30f5c8c396b6e61ee8eb632908.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7e548434d36bd83227d255daa73210cbff716eee --- /dev/null +++ b/data/2025/2504_09xxx/2504.09130/images/b71e7927fc17a4de44c8768ddd0efc75420e0d30f5c8c396b6e61ee8eb632908.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b49dc8114db63463135c7cd924abc8b9f75c7a6c7c8036b8130934af4c9db27 +size 10596 diff --git a/data/2025/2504_09xxx/2504.09130/images/c2f0c45a628a1f3c0208077c01253fb50db99a5005e6237aec7ea485fefab551.jpg b/data/2025/2504_09xxx/2504.09130/images/c2f0c45a628a1f3c0208077c01253fb50db99a5005e6237aec7ea485fefab551.jpg new file mode 100644 index 0000000000000000000000000000000000000000..217f674d8db3322a6e09ae8c3b4497a680a2a9b7 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09130/images/c2f0c45a628a1f3c0208077c01253fb50db99a5005e6237aec7ea485fefab551.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d600a2e7c4e1b5eea71dd2df5d98c95d793765e21a1a172432e79cfdb7dc48a +size 10276 diff --git a/data/2025/2504_09xxx/2504.09130/images/cd4ff9015c99503804b28e55daf60fd50c95fbfc7ca470586add4f184fcd4796.jpg b/data/2025/2504_09xxx/2504.09130/images/cd4ff9015c99503804b28e55daf60fd50c95fbfc7ca470586add4f184fcd4796.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5c7a1d271af9e99b3451c041191ee1a8d9bffdce --- /dev/null +++ b/data/2025/2504_09xxx/2504.09130/images/cd4ff9015c99503804b28e55daf60fd50c95fbfc7ca470586add4f184fcd4796.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6fed9d937576efec24b3364f2a3546c5ef16bb0506211b918e2812875b45161b +size 28624 diff --git a/data/2025/2504_09xxx/2504.09130/images/e6c77e8ae3dba5f734011284a7f9ec8b6a31587cd454c7757bc86e754e59371e.jpg b/data/2025/2504_09xxx/2504.09130/images/e6c77e8ae3dba5f734011284a7f9ec8b6a31587cd454c7757bc86e754e59371e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c81846309f98fcbbc66f08a1c39285a934cfeb81 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09130/images/e6c77e8ae3dba5f734011284a7f9ec8b6a31587cd454c7757bc86e754e59371e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a5ac123fba23cd6ed4c950fd333a82c287e91b523f8eb2df6c7a12e60273269 +size 28985 diff --git a/data/2025/2504_09xxx/2504.09130/images/fffc5b6abb65b39d44f783e6567ff333b981cb2198deaf81d309e62f10b77cef.jpg b/data/2025/2504_09xxx/2504.09130/images/fffc5b6abb65b39d44f783e6567ff333b981cb2198deaf81d309e62f10b77cef.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eed5445de78dd06f4988fefea2d98cf078ac4a6c --- /dev/null +++ b/data/2025/2504_09xxx/2504.09130/images/fffc5b6abb65b39d44f783e6567ff333b981cb2198deaf81d309e62f10b77cef.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8eac2ec9cd4efe6af468f9181e2c18e23101cf4baca6a3c08b5308d132a9413c +size 23226 diff --git a/data/2025/2504_09xxx/2504.09130/layout.json b/data/2025/2504_09xxx/2504.09130/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..e6f6f603db7ccf10e7bb6eeec36ab382cd212610 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09130/layout.json @@ -0,0 +1,7431 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 69, + 75, + 525, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 75, + 525, + 94 + ], + "spans": [ + { + "bbox": [ + 69, + 75, + 525, + 94 + ], + "type": "text", + "content": "VisuoThink: Empowering LVLM Reasoning with Multimodal Tree Search" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 95, + 110, + 498, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 110, + 498, + 139 + ], + "spans": [ + { + "bbox": [ + 95, + 110, + 498, + 139 + ], + "type": "text", + "content": "Yikun Wang" + }, + { + "bbox": [ + 95, + 110, + 498, + 139 + ], + "type": "inline_equation", + "content": "^{12}" + }, + { + "bbox": [ + 95, + 110, + 498, + 139 + ], + "type": "text", + "content": ", Siyin Wang" + }, + { + "bbox": [ + 95, + 110, + 498, + 139 + ], + "type": "inline_equation", + "content": "^{12}" + }, + { + "bbox": [ + 95, + 110, + 498, + 139 + ], + "type": "text", + "content": ", Qinyuan Cheng" + }, + { + "bbox": [ + 95, + 110, + 498, + 139 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 95, + 110, + 498, + 139 + ], + "type": "text", + "content": ", Zhaoye Fei" + }, + { + "bbox": [ + 95, + 110, + 498, + 139 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 95, + 110, + 498, + 139 + ], + "type": "text", + "content": ", Liang Ding" + }, + { + "bbox": [ + 95, + 110, + 498, + 139 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 95, + 110, + 498, + 139 + ], + "type": "text", + "content": ", Qipeng Guo" + }, + { + "bbox": [ + 95, + 110, + 498, + 139 + ], + "type": "inline_equation", + "content": "^{24}" + }, + { + "bbox": [ + 95, + 110, + 498, + 139 + ], + "type": "text", + "content": ", Dacheng Tao" + }, + { + "bbox": [ + 95, + 110, + 498, + 139 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 95, + 110, + 498, + 139 + ], + "type": "text", + "content": ", Xipeng Qiu" + }, + { + "bbox": [ + 95, + 110, + 498, + 139 + ], + "type": "inline_equation", + "content": "^{\\dagger 12}" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 174, + 144, + 418, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 174, + 144, + 418, + 158 + ], + "spans": [ + { + "bbox": [ + 174, + 144, + 418, + 158 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 174, + 144, + 418, + 158 + ], + "type": "text", + "content": " Fudan University " + }, + { + "bbox": [ + 174, + 144, + 418, + 158 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 174, + 144, + 418, + 158 + ], + "type": "text", + "content": " Shanghai Innovation Institute" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 76, + 158, + 516, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 158, + 516, + 186 + ], + "spans": [ + { + "bbox": [ + 76, + 158, + 516, + 186 + ], + "type": "text", + "content": "3 The University of Sydney 4 Shanghai AI Laboratory 5 Nanyang Technological University yikunwang19@fudan.edu.cn" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 155, + 219, + 202, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 155, + 219, + 202, + 232 + ], + "spans": [ + { + "bbox": [ + 155, + 219, + 202, + 232 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 84, + 246, + 274, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 246, + 274, + 556 + ], + "spans": [ + { + "bbox": [ + 84, + 246, + 274, + 556 + ], + "type": "text", + "content": "Recent advancements in Large Vision-Language Models have showcased remarkable capabilities. However, they often falter when confronted with complex reasoning tasks that humans typically address through visual aids and deliberate, step-by-step thinking. While existing methods have explored text-based slow thinking or rudimentary visual assistance, they fall short of capturing the intricate, interleaved nature of human visual-verbal reasoning processes. To overcome these limitations and inspired by the mechanisms of slow thinking in human cognition, we introduce VisuoThink, a novel framework that seamlessly integrates visuospatial and linguistic domains. VisuoThink facilitates multimodal slow thinking by enabling progressive visual-textual reasoning and incorporates test-time scaling through look-ahead tree search. Extensive experiments demonstrate that VisuoThink significantly enhances reasoning capabilities via inference-time scaling, even without fine-tuning, achieving state-of-the-art performance in tasks involving geometry and spatial reasoning. Our code has been open-sourced at https://github.com/ekonwang/VisuoThink." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 571, + 154, + 584 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 571, + 154, + 584 + ], + "spans": [ + { + "bbox": [ + 68, + 571, + 154, + 584 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 594, + 291, + 743 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 594, + 291, + 743 + ], + "spans": [ + { + "bbox": [ + 67, + 594, + 291, + 743 + ], + "type": "text", + "content": "Recent advances in Large Vision-Language Models (LVLMs) (OpenAI, 2024a; Team, 2024) have shown remarkable progress across a variety of tasks. However, these models often struggle with complex reasoning challenges, such as geometric problem-solving (Qiao et al., 2024; Cherian et al., 2024) or spatial reasoning (Ramakrishnan et al., 2024; Wu et al., 2024), where human problem-solving approaches typically rely on visual aids. For example, when solving geometry problems, humans often iteratively sketch auxiliary lines or" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 311, + 221, + 518, + 259 + ], + "blocks": [ + { + "bbox": [ + 311, + 221, + 518, + 259 + ], + "lines": [ + { + "bbox": [ + 311, + 221, + 518, + 259 + ], + "spans": [ + { + "bbox": [ + 311, + 221, + 518, + 259 + ], + "type": "image", + "image_path": "b71e7927fc17a4de44c8768ddd0efc75420e0d30f5c8c396b6e61ee8eb632908.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 311, + 262, + 518, + 307 + ], + "blocks": [ + { + "bbox": [ + 311, + 262, + 518, + 307 + ], + "lines": [ + { + "bbox": [ + 311, + 262, + 518, + 307 + ], + "spans": [ + { + "bbox": [ + 311, + 262, + 518, + 307 + ], + "type": "image", + "image_path": "c2f0c45a628a1f3c0208077c01253fb50db99a5005e6237aec7ea485fefab551.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 311, + 312, + 518, + 425 + ], + "blocks": [ + { + "bbox": [ + 311, + 312, + 518, + 425 + ], + "lines": [ + { + "bbox": [ + 311, + 312, + 518, + 425 + ], + "spans": [ + { + "bbox": [ + 311, + 312, + 518, + 425 + ], + "type": "image", + "image_path": "e6c77e8ae3dba5f734011284a7f9ec8b6a31587cd454c7757bc86e754e59371e.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 302, + 436, + 526, + 532 + ], + "lines": [ + { + "bbox": [ + 302, + 436, + 526, + 532 + ], + "spans": [ + { + "bbox": [ + 302, + 436, + 526, + 532 + ], + "type": "text", + "content": "Figure 1: Illustration of Input-Output Prompting, CoT, Vision-aided Thought and our VisuoThink. Vision-aided Thought often relies on reasoning with one-step or unreliable multi-step visual cues (generated by LVLMs). While VisuoThink addresses this gap through tool-augmented visual hints, coupled with a predictive-rollout search mechanism to systematically optimize reasoning capability." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 543, + 525, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 543, + 525, + 597 + ], + "spans": [ + { + "bbox": [ + 302, + 543, + 525, + 597 + ], + "type": "text", + "content": "visualize intermediate steps, while exploring different reasoning paths - a form of \"slow thinking\" (Kahneman, 2011) that combines visual and verbal cognitive processes." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 598, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 598, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 598, + 525, + 775 + ], + "type": "text", + "content": "With the success of o1 series models (OpenAI, 2024b), researchers have explored language as a medium for implementing slow thinking, coupled with test-time scaling techniques (Zeng et al., 2024). Given the inherently multimodal nature of reality, early efforts (Xu et al., 2024; Thawakar et al., 2025; Yao et al., 2024; Du et al., 2025) have attempted to extend such deliberative thinking to multimodal reasoning. However, even augmented with search strategy, these methods treat visual information merely as static input, relying solely on textual reasoning chains during the reasoning process - creating a \"visual blind spot\", where the" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 13, + 259, + 36, + 609 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 13, + 259, + 36, + 609 + ], + "spans": [ + { + "bbox": [ + 13, + 259, + 36, + 609 + ], + "type": "text", + "content": "arXiv:2504.09130v1 [cs.CL] 12 Apr 2025" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 81, + 751, + 217, + 763 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 751, + 217, + 763 + ], + "spans": [ + { + "bbox": [ + 81, + 751, + 217, + 763 + ], + "type": "text", + "content": "\\*Yikun and Siyin contributed equally" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 81, + 763, + 168, + 774 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 763, + 168, + 774 + ], + "spans": [ + { + "bbox": [ + 81, + 763, + 168, + 774 + ], + "type": "text", + "content": "† Corresponding Author" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 291, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 291, + 232 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 291, + 232 + ], + "type": "text", + "content": "potential for visual information throughout the reasoning process is largely ignored (Fig. 1a). On the other hand, while approaches like VisualSketchpad (Hu et al., 2024) and VoT (Wu et al., 2024) have recognized the importance of visual information by incorporating visual aids in reasoning (Fig. 1b), they mainly focus on single-step assistance or simplified visual hints (e.g., emojis). These methods lack the multi-step visual-textual interleaved reasoning process that characterizes human slow thinking, while failing to explore potential search strategies." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 234, + 291, + 504 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 234, + 291, + 504 + ], + "spans": [ + { + "bbox": [ + 69, + 234, + 291, + 504 + ], + "type": "text", + "content": "To address these limitations, we propose VisuoThink, a multimodal tree search framework that systematically explores multiple reasoning paths with vision-text interleaved thinking at each step. Unlike previous approaches, Visuothink (Fig. 1c) enables multimodal slow thinking through two key innovations: (1) a step-by-step vision-text interleaved reasoning framework that dynamically utilizes multi-step visual aids from tool uses, and (2) a look-ahead tree search algorithm that explores multiple reasoning paths, enabling test-time scaling of the reasoning process. Specifically, our look-ahead tree search incorporates a predictive rollout mechanism that simulates the likely outcomes of different reasoning states. This allows the model to prioritize more promising paths and avoid less ones, guiding the reasoning process toward the optimal solution. Through this test-time scaling capability, the model can thoroughly explore and optimize reasoning paths dynamically during inference." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 505, + 291, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 505, + 291, + 666 + ], + "spans": [ + { + "bbox": [ + 67, + 505, + 291, + 666 + ], + "type": "text", + "content": "Our empirical evaluation demonstrates that Visuothink significantly outperforms existing methods across various reasoning tasks, particularly in geometry and spatial reasoning domains. On Geomeverse, Our methods achieves an accuracy@1 as high as " + }, + { + "bbox": [ + 67, + 505, + 291, + 666 + ], + "type": "inline_equation", + "content": "48.5\\%" + }, + { + "bbox": [ + 67, + 505, + 291, + 666 + ], + "type": "text", + "content": ", with an improvement of as high as " + }, + { + "bbox": [ + 67, + 505, + 291, + 666 + ], + "type": "inline_equation", + "content": "21.8\\%" + }, + { + "bbox": [ + 67, + 505, + 291, + 666 + ], + "type": "text", + "content": " over the state-of-the-art baseline, which particularly shows strong performance of VisuoThink on problems requiring multi-step visual reasoning. Through extensive ablation studies, we show that each component of our framework contributes meaningfully to its overall performance." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 79, + 667, + 252, + 680 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 667, + 252, + 680 + ], + "spans": [ + { + "bbox": [ + 79, + 667, + 252, + 680 + ], + "type": "text", + "content": "In summary, our contributions include:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 81, + 686, + 291, + 775 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 81, + 686, + 290, + 755 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 686, + 290, + 755 + ], + "spans": [ + { + "bbox": [ + 81, + 686, + 290, + 755 + ], + "type": "text", + "content": "- We propose a novel reasoning paradigm, multimodal tree search, for multimodal slow thinking that enables dynamic integration of visual and verbal reasoning paths throughout the problem-solving search process." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 81, + 761, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 761, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 81, + 761, + 291, + 775 + ], + "type": "text", + "content": "- We extend test-time scaling methods to the vi" + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 324, + 71, + 525, + 113 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 71, + 525, + 113 + ], + "spans": [ + { + "bbox": [ + 324, + 71, + 525, + 113 + ], + "type": "text", + "content": "sual domain by proposing a predictive rollout mechanism that explores and optimizes visual reasoning paths by predicting future states." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 317, + 123, + 526, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 123, + 526, + 191 + ], + "spans": [ + { + "bbox": [ + 317, + 123, + 526, + 191 + ], + "type": "text", + "content": "- We demonstrate substantial empirical improvements across multiple reasoning tasks, particularly in geometry and spatial reasoning, with detailed analyses revealing key insights about our approach." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 303, + 216, + 396, + 228 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 216, + 396, + 228 + ], + "spans": [ + { + "bbox": [ + 303, + 216, + 396, + 228 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 303, + 238, + 489, + 252 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 238, + 489, + 252 + ], + "spans": [ + { + "bbox": [ + 303, + 238, + 489, + 252 + ], + "type": "text", + "content": "2.1 Text-centric Reasoning in LVLMs" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 257, + 526, + 420 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 257, + 526, + 420 + ], + "spans": [ + { + "bbox": [ + 302, + 257, + 526, + 420 + ], + "type": "text", + "content": "With the emergence of o1 models (OpenAI, 2024b), the importance of slow thinking has become increasingly evident (Zeng et al., 2024). Several works have attempted to extend this to LVLMs through methods like stage-wise reasoning (Xuet al., 2024), curriculum learning (Thawakar et al., 2025), tree search-based data generation (Yao et al., 2024), and LLM distillation (Du et al., 2025). However, these methods treat visual information as static input, relying only on textual data during reasoning, which limits their ability to fully leverage multimodal information for complex tasks." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 303, + 431, + 441, + 444 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 431, + 441, + 444 + ], + "spans": [ + { + "bbox": [ + 303, + 431, + 441, + 444 + ], + "type": "text", + "content": "2.2 Vision-aided Reasoning" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 449, + 525, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 449, + 525, + 652 + ], + "spans": [ + { + "bbox": [ + 302, + 449, + 525, + 652 + ], + "type": "text", + "content": "Recent advancements in multimodal reasoning have demonstrated that incorporating visual information provides richer context and hints compared to text-only approaches. Early studies adopted a two-stage approach, where visual information is first transformed and grounded into text (Zhang et al., 2023), graph structures (e.g., scene graphs (Mitra et al., 2023) or knowledge graphs (Mondal et al., 2024)), or bounding boxes (Lei et al., 2024), followed by reasoning. Other works leverage existing vision models (e.g., segmentation, detection) to process input images into valuable cues for perception, enabling more precise image-understanding with fine-grained visual information (Yang et al., 2023; Zhou et al., 2024; Gao et al., 2024)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 654, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 654, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 654, + 526, + 775 + ], + "type": "text", + "content": "Another sequence of research focuses on intermediate visual representations to enhance reasoning. For instance, Visual Sketchpad (Hu et al., 2024) employs Python-based drawing tools to generate sketches as intermediate visual aids for geometric problems, while VoT (Wu et al., 2024) formalizes visual thinking by generating emoji-like textual representations. MVOT (Li et al., 2025) fine-tunes multimodal models to generate images" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 73, + 72, + 522, + 232 + ], + "blocks": [ + { + "bbox": [ + 73, + 72, + 522, + 232 + ], + "lines": [ + { + "bbox": [ + 73, + 72, + 522, + 232 + ], + "spans": [ + { + "bbox": [ + 73, + 72, + 522, + 232 + ], + "type": "image", + "image_path": "29ed049967b9b04a6c7dcd056d68c10988ff9f39a5f29f8d545483f002908c51.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 243, + 526, + 292 + ], + "lines": [ + { + "bbox": [ + 67, + 243, + 526, + 292 + ], + "spans": [ + { + "bbox": [ + 67, + 243, + 526, + 292 + ], + "type": "text", + "content": "Figure 2: The illustration of our VisuoThink framework with three stages: (1) vision-text interleaved expansion: generates candidate paths through vision-text interleaved thinking; (2) rollout simulation: sample candidate reasoning nodes and then perform look-ahead search to better evaluate the value of current states; (3) selection: selects the most promising path via self-voting with results or states from rollout." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 312, + 291, + 446 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 312, + 291, + 446 + ], + "spans": [ + { + "bbox": [ + 67, + 312, + 291, + 446 + ], + "type": "text", + "content": "during reasoning, allowing the model to create visual aids dynamically. Despite these advancements, most existing methods rely on single-step or unreliable visual representations, lacking search mechanisms to test-time scaling through exploring multiple reasoning paths. In contrast, we develop a multimodal tree search framework that both leverages multi-step visual cues during reasoning and systematically explores reasoning paths through tree search." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 456, + 257, + 469 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 456, + 257, + 469 + ], + "spans": [ + { + "bbox": [ + 67, + 456, + 257, + 469 + ], + "type": "text", + "content": "2.3 Test-time Scaling with Tree Search" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 474, + 291, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 474, + 291, + 718 + ], + "spans": [ + { + "bbox": [ + 67, + 474, + 291, + 718 + ], + "type": "text", + "content": "Scaling compute at test time has emerged as a powerful strategy to enhance LLMs' reasoning capabilities without increasing model parameters (Snell et al., 2024). Various approaches including BoN (Gui et al., 2024; Sun et al., 2024; Amini et al., 2024), guided beam search (Xie et al., 2023; Yu et al., 2023), and Monte Carlo Tree Search (MCTS) (Feng et al., 2023; Liu et al., 2023; Chen et al., 2024) have been explored for text models, demonstrating improved performance through different search strategies. However, the exploration of test-time scaling in LVLMs remains limited. Prior work like AtomThink (Xiang et al., 2024) has only investigated basic methods such as beam search, with text-only reasoning chains. In contrast, our method introduces vision-text interleaved thinking with look-ahead search, extending test-time scaling to multimodal reasoning." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 727, + 149, + 739 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 727, + 149, + 739 + ], + "spans": [ + { + "bbox": [ + 67, + 727, + 149, + 739 + ], + "type": "text", + "content": "3 VisuoThink" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 748, + 290, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 748, + 290, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 748, + 290, + 775 + ], + "type": "text", + "content": "We propose VisuoThink, a novel framework for multimodal reasoning that dynamically integrates" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 312, + 526, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 312, + 526, + 380 + ], + "spans": [ + { + "bbox": [ + 302, + 312, + 526, + 380 + ], + "type": "text", + "content": "visual and textual information during the inference process. At its core, our framework implements multimodal slow thinking through a key mechanism: predictive rollout search that allows models to think ahead." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 391, + 486, + 404 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 391, + 486, + 404 + ], + "spans": [ + { + "bbox": [ + 302, + 391, + 486, + 404 + ], + "type": "text", + "content": "3.1 Vision-Text Interleaved Thinking" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 409, + 526, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 409, + 526, + 639 + ], + "spans": [ + { + "bbox": [ + 302, + 409, + 526, + 639 + ], + "type": "text", + "content": "Our framework facilitates vision-text interleaved reasoning through an iterative cycle of Thought, Action, and Observation like existing work (Yao et al., 2023), which enables natural and dynamic interactions with external tools. (1) Thought phase: the model leverages visual information for textual reasoning (such as analyzing patterns based on previously added auxiliary lines) and determines the next step by planning what visual hints should be added to enhance understanding. (2) Action phase: the model executes the planned operations by calling external tools (like using Python code to draw auxiliary lines or highlight key features) to generate or modify visual information. (3) Observation phase: the model processes the visual feedback from the Action phase, incorporating these new visual hints into the next reasoning step." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 640, + 527, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 640, + 527, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 640, + 527, + 775 + ], + "type": "text", + "content": "The importance of visual information for LVLM reasoning is highlighted in VisuoThink, which utilize tool invocations to construct reliable visual hints step by step in a visual construction process. This tool-based design allows VisuoThink to flexibly adapt to various visual reasoning tasks. Moreover, unlike approaches (e.g. VisualSketchpad) that generate all visual aids at once, our step-by-step visual guidance naturally integrates with search techniques, enabling effective test-time scaling." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 214, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 214, + 83 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 214, + 83 + ], + "type": "text", + "content": "3.2 Predictive Rollout Search" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 89, + 291, + 250 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 89, + 291, + 250 + ], + "spans": [ + { + "bbox": [ + 67, + 89, + 291, + 250 + ], + "type": "text", + "content": "Based on tree search methods and inspired by MCTS, we propose a predictive rollout search mechanism that interleaves visual-text thinking. By anticipating the outcomes of intermediate states, the model can make timely corrections, enabling more accurate and powerful reasoning. As shown in Figure 2, at each reasoning step, our framework first generates multiple candidate paths through vision-text interleaved thinking, then simulates these paths to predict their outcomes, and finally selects the most promising path through a self-voting mechanism." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 255, + 291, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 255, + 291, + 377 + ], + "spans": [ + { + "bbox": [ + 67, + 255, + 291, + 377 + ], + "type": "text", + "content": "Vision-Text Interleaved Expansion In the whole reasoning chain " + }, + { + "bbox": [ + 67, + 255, + 291, + 377 + ], + "type": "inline_equation", + "content": "\\mathbf{A} = \\{\\mathbf{a}_1, \\mathbf{a}_2, \\dots, \\mathbf{a}_t\\}" + }, + { + "bbox": [ + 67, + 255, + 291, + 377 + ], + "type": "text", + "content": ", given the current node " + }, + { + "bbox": [ + 67, + 255, + 291, + 377 + ], + "type": "inline_equation", + "content": "\\mathbf{a}_{t-1}" + }, + { + "bbox": [ + 67, + 255, + 291, + 377 + ], + "type": "text", + "content": ", the model samples " + }, + { + "bbox": [ + 67, + 255, + 291, + 377 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 67, + 255, + 291, + 377 + ], + "type": "text", + "content": " candidate nodes " + }, + { + "bbox": [ + 67, + 255, + 291, + 377 + ], + "type": "inline_equation", + "content": "\\mathbf{S}_t = \\{\\mathbf{s}_t^1, \\mathbf{s}_t^2, \\dots, \\mathbf{s}_t^k\\}" + }, + { + "bbox": [ + 67, + 255, + 291, + 377 + ], + "type": "text", + "content": ". Each candidate follows the vision-text interleaved thinking process described above, generating a sequence of Thought, Action, and Observation steps. This expansion creates a tree of possible reasoning paths, each representing a different problem-solving strategy." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 381, + 291, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 381, + 291, + 515 + ], + "spans": [ + { + "bbox": [ + 67, + 381, + 291, + 515 + ], + "type": "text", + "content": "Rollout Simulation Visual reasoning often requires multiple steps to reach a conclusion, making it crucial to evaluate the full potential of each path. For each candidate node " + }, + { + "bbox": [ + 67, + 381, + 291, + 515 + ], + "type": "inline_equation", + "content": "\\mathbf{s}_t^i" + }, + { + "bbox": [ + 67, + 381, + 291, + 515 + ], + "type": "text", + "content": ", the model simulates the complete reasoning process to predict final outcomes " + }, + { + "bbox": [ + 67, + 381, + 291, + 515 + ], + "type": "inline_equation", + "content": "\\mathbf{r}_t^i" + }, + { + "bbox": [ + 67, + 381, + 291, + 515 + ], + "type": "text", + "content": ", rather than relying solely on immediate state evaluation. Different from expansion, the simulation extends each candidate node with a single path of vision-text interleaved thinking until reaching a final result." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 520, + 291, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 520, + 291, + 601 + ], + "spans": [ + { + "bbox": [ + 67, + 520, + 291, + 601 + ], + "type": "text", + "content": "Selection The selection of the optimal path is performed through a self-voting mechanism. The model considers the task description, historical nodes, and the simulated path with predicted results for each candidate node. The selection process can be formalized as:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 98, + 608, + 290, + 628 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 608, + 290, + 628 + ], + "spans": [ + { + "bbox": [ + 98, + 608, + 290, + 628 + ], + "type": "interline_equation", + "content": "\\mathbf {S e l e c t} \\left(\\mathbf {S} _ {t}\\right) = \\underset {\\mathbf {s} _ {t} ^ {i} \\in \\mathbf {S} _ {t}} {\\arg \\max } \\mathbf {V o t e} \\left(\\mathbf {A} _ {t - 1}, \\mathbf {s} _ {t} ^ {i}, \\mathbf {r} _ {t} ^ {i}\\right) \\tag {1}", + "image_path": "19e64f65ee71978cf7c2743f225b1ee49725bfe66878d73eeb991ee47bf4ad4f.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 635, + 291, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 635, + 291, + 717 + ], + "spans": [ + { + "bbox": [ + 67, + 635, + 291, + 717 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 635, + 291, + 717 + ], + "type": "inline_equation", + "content": "\\mathbf{A}_{t - 1}" + }, + { + "bbox": [ + 67, + 635, + 291, + 717 + ], + "type": "text", + "content": " represents the historical context, " + }, + { + "bbox": [ + 67, + 635, + 291, + 717 + ], + "type": "inline_equation", + "content": "\\mathbf{s}_t^i" + }, + { + "bbox": [ + 67, + 635, + 291, + 717 + ], + "type": "text", + "content": " for the candidate node, and " + }, + { + "bbox": [ + 67, + 635, + 291, + 717 + ], + "type": "inline_equation", + "content": "\\mathbf{r}_t^i" + }, + { + "bbox": [ + 67, + 635, + 291, + 717 + ], + "type": "text", + "content": " is the predicted result or final state. The Select is a heuristic function served by the LVLM model to guide the process. This selection ensures the model pursues the most promising reasoning strategy." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 726, + 270, + 740 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 726, + 270, + 740 + ], + "spans": [ + { + "bbox": [ + 67, + 726, + 270, + 740 + ], + "type": "text", + "content": "4 Solving Geometry with VisuoThink" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 748, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 748, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 748, + 291, + 775 + ], + "type": "text", + "content": "The core of our methodology is rooted in multi-step visual information processing and search-based rea" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 71, + 526, + 126 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 526, + 126 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 526, + 126 + ], + "type": "text", + "content": "soning, enabling LVLMs to address strongly constrained mathematical problems (e.g., geometry challenges) and open-domain scenarios (such as visual navigation and visual tiling in section 5)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 126, + 526, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 126, + 526, + 275 + ], + "spans": [ + { + "bbox": [ + 302, + 126, + 526, + 275 + ], + "type": "text", + "content": "We formalize geometry problem-solving as a two-phase process integrating visual construction and algebraic computation. In Phase I, the model generates auxiliary lines defined by geometric constraints, such as connecting points " + }, + { + "bbox": [ + 302, + 126, + 526, + 275 + ], + "type": "inline_equation", + "content": "(x_{i},y_{i})" + }, + { + "bbox": [ + 302, + 126, + 526, + 275 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 302, + 126, + 526, + 275 + ], + "type": "inline_equation", + "content": "(x_{j},y_{j})" + }, + { + "bbox": [ + 302, + 126, + 526, + 275 + ], + "type": "text", + "content": ", construct a perpendicular or parallel line to form line segments " + }, + { + "bbox": [ + 302, + 126, + 526, + 275 + ], + "type": "inline_equation", + "content": "\\mathbf{L} = \\{l_i\\}" + }, + { + "bbox": [ + 302, + 126, + 526, + 275 + ], + "type": "text", + "content": ". This phase terminates with a AUX-END token, triggering Phase II, where geometric relationships are translated into solvable equations (e.g., " + }, + { + "bbox": [ + 302, + 126, + 526, + 275 + ], + "type": "inline_equation", + "content": "ax + b = 0" + }, + { + "bbox": [ + 302, + 126, + 526, + 275 + ], + "type": "text", + "content": ") through Python code execution." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 280, + 526, + 391 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 280, + 526, + 391 + ], + "spans": [ + { + "bbox": [ + 302, + 280, + 526, + 391 + ], + "type": "text", + "content": "Task Formulation LVLM should produce the reasoning trajectory consisting of reasoning steps " + }, + { + "bbox": [ + 302, + 280, + 526, + 391 + ], + "type": "inline_equation", + "content": "\\mathbf{A} = \\{\\mathbf{a}_t\\}" + }, + { + "bbox": [ + 302, + 280, + 526, + 391 + ], + "type": "text", + "content": " that leads to the final result " + }, + { + "bbox": [ + 302, + 280, + 526, + 391 + ], + "type": "inline_equation", + "content": "\\mathbf{r}" + }, + { + "bbox": [ + 302, + 280, + 526, + 391 + ], + "type": "text", + "content": ", given the original problem " + }, + { + "bbox": [ + 302, + 280, + 526, + 391 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}" + }, + { + "bbox": [ + 302, + 280, + 526, + 391 + ], + "type": "text", + "content": " while taking into account the auxiliary lines " + }, + { + "bbox": [ + 302, + 280, + 526, + 391 + ], + "type": "inline_equation", + "content": "\\mathbf{L}" + }, + { + "bbox": [ + 302, + 280, + 526, + 391 + ], + "type": "text", + "content": ". The framework operates under a constraint " + }, + { + "bbox": [ + 302, + 280, + 526, + 391 + ], + "type": "inline_equation", + "content": "\\sum_{t=1}^{|A|} \\| \\mathbf{a}_t \\| \\leq \\tau" + }, + { + "bbox": [ + 302, + 280, + 526, + 391 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 302, + 280, + 526, + 391 + ], + "type": "inline_equation", + "content": "\\mathbf{a}_t" + }, + { + "bbox": [ + 302, + 280, + 526, + 391 + ], + "type": "text", + "content": " denotes visual-textual reasoning steps and " + }, + { + "bbox": [ + 302, + 280, + 526, + 391 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 302, + 280, + 526, + 391 + ], + "type": "text", + "content": " is the maximum step limit:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 312, + 410, + 525, + 440 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 410, + 525, + 440 + ], + "spans": [ + { + "bbox": [ + 312, + 410, + 525, + 440 + ], + "type": "interline_equation", + "content": "\\mathbf {A} \\sim \\mathcal {P} \\left(\\left\\{\\mathbf {a} _ {1}, \\dots , \\mathbf {a} _ {| A |}, \\mathbf {r} \\right\\} \\mid \\mathbf {Q}, \\mathbf {L}\\right) \\text {s . t .} \\sum_ {t = 1} ^ {| \\mathbf {A} |} \\| \\mathbf {a} _ {i} \\| \\leq \\tau \\tag {2}", + "image_path": "99750640e65a54fd353d13a477f47d9eb452145299e0e8cc39dc95e23dfbf8b8.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 452, + 525, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 452, + 525, + 506 + ], + "spans": [ + { + "bbox": [ + 302, + 452, + 525, + 506 + ], + "type": "text", + "content": "This formulation mirrors human problem-solving by decomposing proofs into executable visual-textual steps, validated via coordinate-based tools like matplotlib and equation solver." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 512, + 526, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 512, + 526, + 661 + ], + "spans": [ + { + "bbox": [ + 302, + 512, + 526, + 661 + ], + "type": "text", + "content": "Visual Construction We emphasize the criticality of incremental visual information for accurate solutions, where multi-step graphical representations originate from the progressive construction of auxiliary lines. This multi-stage approach facilitates search algorithm-enhanced refinement of auxiliary line generation, significantly improving LVLM capabilities in geometric reasoning. Consistent with Sketchpad methodology, we exclusively utilize common Python libraries (e.g., matplotlib) for diagram rendering." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 666, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 666, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 666, + 526, + 775 + ], + "type": "text", + "content": "Algebraic Computation Unlike general tasks, solving geometry problems cannot rely solely on visual construction or the model's inherent capabilities; instead, it necessitates the use of computational tools to achieve precise and accurate results. This requirement stems from the need for exact numerical solutions and the mitigation of potential errors in geometric reasoning. Through systematic" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 94, + 68, + 500, + 196 + ], + "blocks": [ + { + "bbox": [ + 94, + 68, + 500, + 196 + ], + "lines": [ + { + "bbox": [ + 94, + 68, + 500, + 196 + ], + "spans": [ + { + "bbox": [ + 94, + 68, + 500, + 196 + ], + "type": "table", + "html": "
ModelGPT-4oQwen2-VL-72B-InstructClaude-3.5-sonnet
Geomverse-109CoT11.15.614.4
VisualSketchpad8.96.716.7
VisualSketchpad + Equation Solver13.311.117.8
VisuoThink w/o rollout search (ours)24.419.026.7
VisuoThink (ours)28.925.627.8
Geometry3K(Lu et al., 2021)CoT20.818.837.5
VisualSketchPad22.917.039.6
VisualSketchpad + Equation Solver25.014.941.7
VisuoThink w/o rollout search (ours)27.120.837.5
VisuoThink (ours)33.325.043.8
", + "image_path": "8beb56cc31f29e1a3c4943714634b9fb69453ae0a5a20d7250521aaf41b59cc2.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 82, + 264, + 511, + 456 + ], + "blocks": [ + { + "bbox": [ + 67, + 205, + 526, + 255 + ], + "lines": [ + { + "bbox": [ + 67, + 205, + 526, + 255 + ], + "spans": [ + { + "bbox": [ + 67, + 205, + 526, + 255 + ], + "type": "text", + "content": "Table 1: The 1-shot benchmark results (Accuracy@1) on Geometry including Geomverse-109 and Geometry3k of SOTA large visual language models. For GPT-4o and Claude-3.5-sonnet, we employ newest cutoffs (gpt-4o-2024-11-20 and claude-3-5-sonnet-20241022) separately. The gray part indicates results from VisuoThink and bold results represent the best performance." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 82, + 264, + 511, + 456 + ], + "lines": [ + { + "bbox": [ + 82, + 264, + 511, + 456 + ], + "spans": [ + { + "bbox": [ + 82, + 264, + 511, + 456 + ], + "type": "table", + "html": "
ModelDatasetVisual NavigationVisual Tiling
Subset (Num. Samples)level-3 (16)level-4 (31)level-5 (62)level-2 (119)
GPT-4oCoT18.83.20.00.8
VoT25.00.00.01.7
VoT + Executor62.59.74.812.6
VisuoThink w/o rollout search (ours)81.232.311.319.3
VisuoThink (ours)93.861.319.451.2
Qwen2-VL-72B-InstructCoT6.73.2-0.0
VoT0.00.0-0.8
VoT + Executor25.03.2-6.7
VisuoThink w/o rollout search (ours)50.06.5-9.2
VisuoThink (ours)81.312.9-20.2
Claude-3.5-sonnetCoT37.53.20.00.8
VoT56.30.00.02.5
VoT + Executor68.822.616.110.1
VisuoThink w/o rollout search (ours)81.238.741.980.7
VisuoThink (ours)93.861.353.284.0
", + "image_path": "7ec840b1973798c457f4be3a21bf34893e11feae173b9b3dfa750a44bdc86c48.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 465, + 525, + 526 + ], + "lines": [ + { + "bbox": [ + 67, + 465, + 525, + 526 + ], + "spans": [ + { + "bbox": [ + 67, + 465, + 525, + 526 + ], + "type": "text", + "content": "Table 2: The Pass@1 performance comparison on spatial reasoning benchmarks including Visual Navigation and Visual Tiling across SOTA LVLMs. The gray part indicates results from VisuoThink and bold results represent the best performance. The results of Qwen2-VL-72B-Instruct on Visual Navigation (" + }, + { + "bbox": [ + 67, + 465, + 525, + 526 + ], + "type": "inline_equation", + "content": "k = 5" + }, + { + "bbox": [ + 67, + 465, + 525, + 526 + ], + "type": "text", + "content": ") are masked out due to its restrained performance on the subset. The results from VoT with Executor are also reported, where the models utilize the unreliable visual hints generated by themselves rather than executor, consistent with the VoT framework." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 539, + 291, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 539, + 291, + 647 + ], + "spans": [ + { + "bbox": [ + 67, + 539, + 291, + 647 + ], + "type": "text", + "content": "integration, like VPD (Zhao et al., 2023), and VisualStechpad (Hu et al., 2024), phase II employs Python code execution for precise computation to mitigate LVLM hallucination risks. Furthermore, the model constructs single-variable algebraic equations based on identified geometric relationships, subsequently invoking equation solvers for numerical resolution." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 660, + 180, + 673 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 660, + 180, + 673 + ], + "spans": [ + { + "bbox": [ + 67, + 660, + 180, + 673 + ], + "type": "text", + "content": "4.1 Empirical Results" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 680, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 680, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 680, + 291, + 775 + ], + "type": "text", + "content": "Setup We conduct comprehensive evaluations on the challenging Geometry3K and Geomverse-109 datasets to demonstrate the methodological superiority. Especially we detail the trajectory of Geomverse-109 dataset synthesis in appendix E. SOTA closed-source models including gpt-4o2024-11-20 and claude-3-5-sonnet-20241022 are" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 539, + 526, + 623 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 539, + 526, + 623 + ], + "spans": [ + { + "bbox": [ + 302, + 539, + 526, + 623 + ], + "type": "text", + "content": "leveraged for inference. To ensure architectural diversity, open-source model (e.g., Qwen2-VL-72B) were incorporated; however, smaller-parameter open-source variants were excluded due to their capability constraints. And we detail the model and algorithm hyperparameters in appendix D." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 640, + 527, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 640, + 527, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 640, + 527, + 775 + ], + "type": "text", + "content": "Analysis Our empirical results reveal that, even without rollout search augmentation, our strategy substantially enhances LVLM reasoning capabilities compared to Chain-of-Thought (CoT) (MiTRA et al., 2023) and Visual Sketchpad (Hu et al., 2024) baselines. Notably, on the Geomverse-109 (Kazemi et al., 2023) benchmark, VisuoThink outperforms CoT and Visual Sketchpad by an average of " + }, + { + "bbox": [ + 302, + 640, + 527, + 775 + ], + "type": "inline_equation", + "content": "17.1\\%" + }, + { + "bbox": [ + 302, + 640, + 527, + 775 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 302, + 640, + 527, + 775 + ], + "type": "inline_equation", + "content": "16.7\\%" + }, + { + "bbox": [ + 302, + 640, + 527, + 775 + ], + "type": "text", + "content": " across all evaluated models, and predictive rollout search further" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 69, + 70, + 523, + 247 + ], + "blocks": [ + { + "bbox": [ + 69, + 70, + 523, + 247 + ], + "lines": [ + { + "bbox": [ + 69, + 70, + 523, + 247 + ], + "spans": [ + { + "bbox": [ + 69, + 70, + 523, + 247 + ], + "type": "image", + "image_path": "20b92721a14115ff4a0cef767e26085517041cdebeaa23a8db3c478939d7fe61.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 257, + 525, + 295 + ], + "lines": [ + { + "bbox": [ + 67, + 257, + 525, + 295 + ], + "spans": [ + { + "bbox": [ + 67, + 257, + 525, + 295 + ], + "type": "text", + "content": "Figure 3: The illustration of spatial reasoning tasks derived from VoT (Wu et al., 2024), including Visual Navigation and Visual Tiling. LVLM is required to execute a sequence of actions to complete certain goals. Our experimental setting makes them much more challenging and closer to real-environment deployment." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 308, + 291, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 308, + 291, + 470 + ], + "spans": [ + { + "bbox": [ + 67, + 308, + 291, + 470 + ], + "type": "text", + "content": "enhances models' performance by an average of " + }, + { + "bbox": [ + 67, + 308, + 291, + 470 + ], + "type": "inline_equation", + "content": "4.1\\%" + }, + { + "bbox": [ + 67, + 308, + 291, + 470 + ], + "type": "text", + "content": ". Also, the employment of equation solver on Visual Sketchpad also increases an average performance of " + }, + { + "bbox": [ + 67, + 308, + 291, + 470 + ], + "type": "inline_equation", + "content": "3.3\\%" + }, + { + "bbox": [ + 67, + 308, + 291, + 470 + ], + "type": "text", + "content": ". This performance gap likely stems from Geomverse's emphasis on geometric relationship construction, where our equation-solving framework helps to accurately get intermediate answers and enables efficient resolution of structurally complex problems. The systematic integration of geometric analysis tools further mitigates error propagation inherent in conventional LVLM reasoning baselines." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 483, + 270, + 497 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 483, + 270, + 497 + ], + "spans": [ + { + "bbox": [ + 67, + 483, + 270, + 497 + ], + "type": "text", + "content": "5 Spatial Reasoning with VisuoThink" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 507, + 291, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 507, + 291, + 642 + ], + "spans": [ + { + "bbox": [ + 67, + 507, + 291, + 642 + ], + "type": "text", + "content": "Spatial reasoning, defined as the cognitive capability to interpret spatial object relationships, motion dynamics, and environmental interactions, constitutes a foundational requirement for mission-critical applications such as robotic systems, autonomous navigation, and augmented reality. These domains demand robust integration of visual perception and precise manipulation of spatial-temporal constraints for optimal action planning." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 653, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 653, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 653, + 291, + 775 + ], + "type": "text", + "content": "Task Formulation Building upon the Visualization of Thought (VoT) (Wu et al., 2024) benchmarks, we design two challenging spatial reasoning benchmarks with enhanced complexity as shown in figure 3: Visual Navigation and Visual Tiling. We provide detailed materials of the differences between the original VoT benchmark setup and our experimental configuration in Appendix B and additionally provide the mathematical task formu" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 302, + 308, + 397, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 308, + 397, + 321 + ], + "spans": [ + { + "bbox": [ + 302, + 308, + 397, + 321 + ], + "type": "text", + "content": "lation in appendix C." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 328, + 526, + 532 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 328, + 526, + 532 + ], + "spans": [ + { + "bbox": [ + 302, + 328, + 526, + 532 + ], + "type": "text", + "content": "Visual Construction via Executor During task execution, robots deployed in true environments typically receive environmental feedback following each action, which facilitates perception and subsequent decision-making processes. In our methodology, we leverage environmental interaction tools to enhance the model's spatial reasoning capabilities. In each action, we employ an executor to implement the corresponding action, and return textual execution feedback and visuospatial hint (optional) representing the map state. In the context of (1) Visual Navigation, the visual feedback corresponds to the map including agent's current position; while in (2) Visual Tiling scenarios, it represents the current state of rectangle occupation patterns." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 541, + 414, + 554 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 541, + 414, + 554 + ], + "spans": [ + { + "bbox": [ + 302, + 541, + 414, + 554 + ], + "type": "text", + "content": "5.1 Empirical Results" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 558, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 558, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 558, + 526, + 775 + ], + "type": "text", + "content": "Setup We evaluate our framework on two spatial reasoning benchmarks: Visual Navigation and Visual Tiling. For Visual Navigation, we create three difficulty levels with increasing map complexity, where the level indicates the " + }, + { + "bbox": [ + 302, + 558, + 526, + 775 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 302, + 558, + 526, + 775 + ], + "type": "text", + "content": " for Visual Navigation as shown in table 2. For Visual Tiling, we focus on level-2 (i.e. " + }, + { + "bbox": [ + 302, + 558, + 526, + 775 + ], + "type": "inline_equation", + "content": "k = 2" + }, + { + "bbox": [ + 302, + 558, + 526, + 775 + ], + "type": "text", + "content": ") problems with 119 samples. We compare our method against Chain-of-Thought (CoT), Visualization of Thought (VoT) (Wu et al., 2024). As table 2 indicates, the results from VoT with tool interactions (i.e. Executor) are also reported, where textual feedbacks are employed but the visual hints are still generated by the model rather from executor, consistent with the VoT framework. The source of visual hints distinguishes it from our method. We employ the same" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 89, + 71, + 295, + 229 + ], + "blocks": [ + { + "bbox": [ + 89, + 71, + 295, + 229 + ], + "lines": [ + { + "bbox": [ + 89, + 71, + 295, + 229 + ], + "spans": [ + { + "bbox": [ + 89, + 71, + 295, + 229 + ], + "type": "image", + "image_path": "6d288c79a237e2ab479ac33116fe8a946a09484b0885513867ef20b7464ff990.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 66, + 245, + 525, + 317 + ], + "lines": [ + { + "bbox": [ + 66, + 245, + 525, + 317 + ], + "spans": [ + { + "bbox": [ + 66, + 245, + 525, + 317 + ], + "type": "text", + "content": "Figure 4: (LEFT) The trend of Pass@1 rate on Visual Navigation as the number of reasoning steps increases. (RIGHT) The relationship between the Accuracy@1 on geometry problems (Geomverse) and tree width for rollout search. We observe that LVLMs significantly benefit from longer reasoning chains, although the effect plateaus rapidly beyond a certain threshold of reasoning steps. The relationship between performance and tree width exhibits a more complex pattern, demonstrating an inverted U-shaped trend with both GPT-4o and Claude-3.5-Sonnet." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 296, + 70, + 502, + 229 + ], + "blocks": [ + { + "bbox": [ + 296, + 70, + 502, + 229 + ], + "lines": [ + { + "bbox": [ + 296, + 70, + 502, + 229 + ], + "spans": [ + { + "bbox": [ + 296, + 70, + 502, + 229 + ], + "type": "image", + "image_path": "9431609d3275408fd7e42656f19f0a2fd6844e082a9d6e1281f04a2478c4525c.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 328, + 290, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 328, + 290, + 354 + ], + "spans": [ + { + "bbox": [ + 67, + 328, + 290, + 354 + ], + "type": "text", + "content": "temperature and VisuoThink hyperparameters as section 4.1." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 368, + 291, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 368, + 291, + 556 + ], + "spans": [ + { + "bbox": [ + 67, + 368, + 291, + 556 + ], + "type": "text", + "content": "Analysis In spatial reasoning experiments, VisuoThink demonstrates significant performance improvements over baseline methods, particularly when augmented with predictive rollout search. As shown in Table 2, VisuoThink achieves the highest accuracy across all tasks, outperforming both CoT and VoT baselines. For instance, on the Visual Navigation task, VisuoThink on GPT-4o achieves a " + }, + { + "bbox": [ + 67, + 368, + 291, + 556 + ], + "type": "inline_equation", + "content": "93.8\\%" + }, + { + "bbox": [ + 67, + 368, + 291, + 556 + ], + "type": "text", + "content": " accuracy at level-3, compared to " + }, + { + "bbox": [ + 67, + 368, + 291, + 556 + ], + "type": "inline_equation", + "content": "62.5\\%" + }, + { + "bbox": [ + 67, + 368, + 291, + 556 + ], + "type": "text", + "content": " for VoT with an executor and " + }, + { + "bbox": [ + 67, + 368, + 291, + 556 + ], + "type": "inline_equation", + "content": "18.8\\%" + }, + { + "bbox": [ + 67, + 368, + 291, + 556 + ], + "type": "text", + "content": " for CoT. This trend is consistent across different model architectures, including GPT-4o, Qwen2-VL-72B-Instruct, and Claude-3.5-sonnet, highlighting the robustness of our approach." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 558, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 558, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 558, + 291, + 775 + ], + "type": "text", + "content": "Similar to the geometry experiments in Section 4, the integration of tool interactions and multi-step visual reasoning plays a critical role in enhancing performance. The executor's feedback mechanism, which provides visual updates after each action, mirrors the incremental visual refinement seen in geometry tasks, where auxiliary lines are progressively constructed. For instance, VisuoThink without rollout search demonstrates an average improvement of " + }, + { + "bbox": [ + 67, + 558, + 291, + 775 + ], + "type": "inline_equation", + "content": "34.7\\%" + }, + { + "bbox": [ + 67, + 558, + 291, + 775 + ], + "type": "text", + "content": " on Visual Tiling across diverse models. We observe that while VoT augmented with textual feedback achieves an average increase of " + }, + { + "bbox": [ + 67, + 558, + 291, + 775 + ], + "type": "inline_equation", + "content": "8.1\\%" + }, + { + "bbox": [ + 67, + 558, + 291, + 775 + ], + "type": "text", + "content": ", its performance gain is notably less pronounced compared to VisuoThink without rollout search. This underscores the critical role of reliable visual cues in enhancing reasoning capa" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 302, + 328, + 525, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 328, + 525, + 368 + ], + "spans": [ + { + "bbox": [ + 302, + 328, + 525, + 368 + ], + "type": "text", + "content": "bilities. The dynamic interaction allows the model to iteratively refine its reasoning path, leading to more accurate solutions." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 381, + 379, + 394 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 381, + 379, + 394 + ], + "spans": [ + { + "bbox": [ + 302, + 381, + 379, + 394 + ], + "type": "text", + "content": "6 Discussion" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 405, + 526, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 405, + 526, + 513 + ], + "spans": [ + { + "bbox": [ + 302, + 405, + 526, + 513 + ], + "type": "text", + "content": "In this section, we analyze key aspects of VisuoThink's performance. We examine how the length of reasoning chain affects spatial reasoning, the impact of child node expansion in rollout search, and the influence of supervision levels in predictive rollouts across tasks. These insights highlight VisuoThink's effectiveness and suggest future directions for multimodal reasoning frameworks." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 525, + 514, + 552 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 525, + 514, + 552 + ], + "spans": [ + { + "bbox": [ + 302, + 525, + 514, + 552 + ], + "type": "text", + "content": "6.1 Could Longer Reasoning Chains Assist LVLMs in Reasoning?" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 301, + 557, + 526, + 747 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 557, + 526, + 747 + ], + "spans": [ + { + "bbox": [ + 301, + 557, + 526, + 747 + ], + "type": "text", + "content": "In practical applications of " + }, + { + "bbox": [ + 301, + 557, + 526, + 747 + ], + "type": "inline_equation", + "content": "LVLMs" + }, + { + "bbox": [ + 301, + 557, + 526, + 747 + ], + "type": "text", + "content": " for spatial reasoning tasks, each tool invocation can be seen as an agent attempting an action in the environment and receiving feedback. Although many attempts may be inaccurate, allowing the model more trial-and-error opportunities before achieving the final goal could potentially enhance its reasoning capabilities. By setting different upper limits on the number of reasoning steps in visual navigation tasks, we observe a positive correlation between the number of reasoning steps and the model's task completion rate. This suggests that the model indeed benefits from more tool invocations and longer reasoning." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 748, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 748, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 748, + 525, + 775 + ], + "type": "text", + "content": "However, as the number of reasoning steps increases, the completion rate gradually converges," + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 293, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 293, + 275 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 293, + 275 + ], + "type": "text", + "content": "making further significant improvements challenging. As shown in figure 4 (left), for instance, increasing reasoning steps from 10 to 20 resulted in substantial performance gains " + }, + { + "bbox": [ + 67, + 71, + 293, + 275 + ], + "type": "inline_equation", + "content": "(+54.1\\%" + }, + { + "bbox": [ + 67, + 71, + 293, + 275 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 71, + 293, + 275 + ], + "type": "inline_equation", + "content": "+48.4\\%)" + }, + { + "bbox": [ + 67, + 71, + 293, + 275 + ], + "type": "text", + "content": " across different LVLM architectures (GPT-4o and Claude-3.5-sonnet). However, when reasoning steps were increased from 20 to 40, the performance growth slowed dramatically, dropping to " + }, + { + "bbox": [ + 67, + 71, + 293, + 275 + ], + "type": "inline_equation", + "content": "+6.5\\%" + }, + { + "bbox": [ + 67, + 71, + 293, + 275 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 71, + 293, + 275 + ], + "type": "inline_equation", + "content": "+2.1\\%" + }, + { + "bbox": [ + 67, + 71, + 293, + 275 + ], + "type": "text", + "content": ", respectively. This phenomenon aligns with expectations, as merely increasing the number of tool invocations does not enable the model to better solve the most challenging samples. This underscores the necessity of techniques like rollout search within the broader context of test scaling." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 312, + 258, + 338 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 312, + 258, + 338 + ], + "spans": [ + { + "bbox": [ + 67, + 312, + 258, + 338 + ], + "type": "text", + "content": "6.2 Could Larger Tree Span Enhances VisuoThink's Performance?" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 360, + 291, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 360, + 291, + 537 + ], + "spans": [ + { + "bbox": [ + 67, + 360, + 291, + 537 + ], + "type": "text", + "content": "Predictive rollouts enhance the model's reasoning capabilities, which can be viewed as a tangible outcome of successfully expanding the model's reasoning search space. A natural question arises: Can we further improve the model's reasoning performance on benchmarks simply by increasing the number of candidate child nodes at each selection step, i.e., expanding the tree width, thereby enhancing model's reasoning capability? To investigate this, we conducted comparative experiments on geometry tasks using GPT-4o and Claude-3.5-sonnet, keeping the depth of the reasoning tree constant while varying the number of candidate child nodes." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 544, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 544, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 544, + 291, + 775 + ], + "type": "text", + "content": "As presented in figure 4 (right), we observed an inverted U-shaped trend in overall performance as the number of candidate tree nodes increased across different model architectures. Notably, when the number of candidate child nodes equals 1, the model follows a single reasoning path, effectively bypassing predictive rollout search. Contrary to expectations, the performance trend initially rises and then declines. This counterintuitive result can be attributed to the inherent errors in the model's evaluation of child nodes. Simply and aggressively increasing the tree width leads to confusion in selecting child nodes, which in turn reduces overall reasoning efficiency. Thus, an interesting conclusion emerges: we cannot expect to continuously improve model performance by merely increasing the number of child nodes in rollout search." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 311, + 71, + 511, + 227 + ], + "blocks": [ + { + "bbox": [ + 311, + 71, + 511, + 227 + ], + "lines": [ + { + "bbox": [ + 311, + 71, + 511, + 227 + ], + "spans": [ + { + "bbox": [ + 311, + 71, + 511, + 227 + ], + "type": "image", + "image_path": "cd4ff9015c99503804b28e55daf60fd50c95fbfc7ca470586add4f184fcd4796.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 302, + 242, + 526, + 292 + ], + "lines": [ + { + "bbox": [ + 302, + 242, + 526, + 292 + ], + "spans": [ + { + "bbox": [ + 302, + 242, + 526, + 292 + ], + "type": "text", + "content": "Figure 5: The performance gain " + }, + { + "bbox": [ + 302, + 242, + 526, + 292 + ], + "type": "inline_equation", + "content": "(+ \\%)" + }, + { + "bbox": [ + 302, + 242, + 526, + 292 + ], + "type": "text", + "content": " on tasks through predictive rollout search. The performance gain is calculated via the performance gap between VisuoThink (w/o rollout search) and VisuoThink." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 302, + 305, + 525, + 331 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 305, + 525, + 331 + ], + "spans": [ + { + "bbox": [ + 302, + 305, + 525, + 331 + ], + "type": "text", + "content": "6.3 Strong v.s. Weak Supervision in Predictive Rollout Search" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 301, + 336, + 525, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 336, + 525, + 498 + ], + "spans": [ + { + "bbox": [ + 301, + 336, + 525, + 498 + ], + "type": "text", + "content": "An intriguing observation is that the strength of guidance provided by predictive rollout results varies between geometry and spatial reasoning tasks. In geometry tasks, the model only receives the final numerical results of the problem, whereas in spatial reasoning tasks, the model has access to visual states of stronger supervision (e.g., the agent's final position, the position of the destination, etc.). In other words, predictive rollouts in geometry tasks offer weaker supervision, while those in spatial reasoning tasks provide stronger supervision." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 499, + 526, + 716 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 499, + 526, + 716 + ], + "spans": [ + { + "bbox": [ + 302, + 499, + 526, + 716 + ], + "type": "text", + "content": "This observation aligns with the findings of the Deepseek R1 report, which highlights that outcome-based supervision in RL can significantly enhance Deepseek-R1-Zero's reasoning capabilities (DeepSeek-AI, 2025). The effectiveness of such supervision stems from its strong supervisory signal, and predictive rollouts with strong supervision are more effective in improving model reasoning performance. This is further supported by our experimental results, as illustrated in figure 5, where predictive rollouts demonstrated more substantial performance gains in spatial reasoning tasks compared to geometry tasks, across both open-source and closed-source models. The detailed performance gain results are presented in appendix A." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 303, + 726, + 381, + 739 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 726, + 381, + 739 + ], + "spans": [ + { + "bbox": [ + 303, + 726, + 381, + 739 + ], + "type": "text", + "content": "7 Conclusion" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 748, + 525, + 776 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 748, + 525, + 776 + ], + "spans": [ + { + "bbox": [ + 302, + 748, + 525, + 776 + ], + "type": "text", + "content": "We present VisuoThink, a multimodal tree search framework enhancing LVLM reasoning through" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 293, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 293, + 222 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 293, + 222 + ], + "type": "text", + "content": "dynamic visual-textual interleaving and predictive rollout search. Our approach demonstrates significant improvements across geometry and spatial reasoning tasks without requiring model fine-tuning. Empirical results show substantial performance gains on geometry and spatial reasoning benchmarks. Our analysis reveals key insights about tool interaction benefits, search space optimization, and supervision strength in multimodal reasoning. These findings open new possibilities for advancing LVLM capabilities in complex reasoning tasks." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 230, + 131, + 243 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 230, + 131, + 243 + ], + "spans": [ + { + "bbox": [ + 68, + 230, + 131, + 243 + ], + "type": "text", + "content": "Limitations" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 251, + 291, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 251, + 291, + 441 + ], + "spans": [ + { + "bbox": [ + 67, + 251, + 291, + 441 + ], + "type": "text", + "content": "Despite its strong performance, VisuoThink has several limitations. First, the predictive rollout search process introduces significant computational overhead, making it potentially impractical for real-time applications. Second, our approach particularly relies on tool interactions for stronger capability, which may require more effort in some specific deployment environments. Third, the framework's effectiveness is constrained by the quality of the base VLM's reasoning capabilities - while it enhances performance, it cannot overcome fundamental model limitations. Finally, our evaluation focuses primarily on geometric and spatial reasoning tasks." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 450, + 269, + 464 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 450, + 269, + 464 + ], + "spans": [ + { + "bbox": [ + 68, + 450, + 269, + 464 + ], + "type": "text", + "content": "Ethics and Reproducibility Statements" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 471, + 291, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 471, + 291, + 581 + ], + "spans": [ + { + "bbox": [ + 67, + 471, + 291, + 581 + ], + "type": "text", + "content": "Ethics We take ethical considerations very seriously and strictly adhere to the ACL Ethics Policy. This paper proposes a test-time slow-thinking framework to improve the multimodal reasoning ability of current LVLMs. All evaluation datasets used in this paper will be publicly available or have been widely adopted by researchers. Thus, we believe that this research will not pose ethical issues." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 587, + 291, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 587, + 291, + 682 + ], + "spans": [ + { + "bbox": [ + 67, + 587, + 291, + 682 + ], + "type": "text", + "content": "Reproducibility In this paper, we discuss the detailed experimental setup, such as hyperparameters, implementation of algorithm, and statistic descriptions. More importantly, we will open source our code and data in the future to help reproduce the experimental results of this paper." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 304, + 70, + 362, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 70, + 362, + 83 + ], + "spans": [ + { + "bbox": [ + 304, + 70, + 362, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 91, + 526, + 775 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 304, + 91, + 525, + 115 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 91, + 525, + 115 + ], + "spans": [ + { + "bbox": [ + 304, + 91, + 525, + 115 + ], + "type": "text", + "content": "Afra Amini, Tim Vieira, and Ryan Cotterell. 2024. Variational best-of-n alignment. *ArXiv*, abs/2407.06057." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 124, + 526, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 124, + 526, + 158 + ], + "spans": [ + { + "bbox": [ + 304, + 124, + 526, + 158 + ], + "type": "text", + "content": "Guoxin Chen, Minpeng Liao, Chengxi Li, and Kai Fan. 2024. Alphamath almost zero: process supervision without process. ArXiv, abs/2405.03553." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 168, + 526, + 225 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 168, + 526, + 225 + ], + "spans": [ + { + "bbox": [ + 304, + 168, + 526, + 225 + ], + "type": "text", + "content": "Anoop Cherian, Kuan-Chuan Peng, Suhas Lohit, Joanna Matthiesen, Kevin Smith, and Joshua B Tenenbaum. 2024. Evaluating large vision-and-language models on children's mathematical olympiads. arXiv preprint arXiv:2406.15736." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 234, + 526, + 269 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 234, + 526, + 269 + ], + "spans": [ + { + "bbox": [ + 304, + 234, + 526, + 269 + ], + "type": "text", + "content": "DeepSeek-AI. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. Preprint, arXiv:2501.12948." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 278, + 526, + 324 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 278, + 526, + 324 + ], + "spans": [ + { + "bbox": [ + 304, + 278, + 526, + 324 + ], + "type": "text", + "content": "Yifan Du, Zikang Liu, Yifan Li, Wayne Xin Zhao, Yuqi Huo, Bingning Wang, Weipeng Chen, Zheng Liu, Zhongyuan Wang, and Jiahui Wen. 2025. Virgo: A preliminary exploration on reproducing o1-like mllm." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 333, + 526, + 379 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 333, + 526, + 379 + ], + "spans": [ + { + "bbox": [ + 304, + 333, + 526, + 379 + ], + "type": "text", + "content": "Xidong Feng, Ziyu Wan, Muning Wen, Ying Wen, Weinan Zhang, and Jun Wang. 2023. Alphazero-like tree-search can guide large language model decoding and training. ArXiv, abs/2309.17179." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 388, + 526, + 444 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 388, + 526, + 444 + ], + "spans": [ + { + "bbox": [ + 304, + 388, + 526, + 444 + ], + "type": "text", + "content": "Timin Gao, Peixian Chen, Mengdan Zhang, Chaoyou Fu, Yunhang Shen, Yan Zhang, Shengchuan Zhang, Xiawu Zheng, Xing Sun, Liujuan Cao, and Rongrong Ji. 2024. Cantor: Inspiring multimodal chain-of-thought of mllm. ArXiv, abs/2404.16033." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 454, + 526, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 454, + 526, + 499 + ], + "spans": [ + { + "bbox": [ + 304, + 454, + 526, + 499 + ], + "type": "text", + "content": "Lin Gui, Cristina Garbacea, and Victor Veitch. 2024. Bonbon alignment for large language models and the sweetness of best-of-n sampling. ArXiv, abs/2406.00832." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 509, + 526, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 509, + 526, + 565 + ], + "spans": [ + { + "bbox": [ + 304, + 509, + 526, + 565 + ], + "type": "text", + "content": "Yushi Hu, Weijia Shi, Xingyu Fu, Dan Roth, Mari Ostendorf, Luke S. Zettlemoyer, Noah A. Smith, and Ranjay Krishna. 2024. Visual sketchpad: Sketching as a visual chain of thought for multimodal language models. ArXiv, abs/2406.09403." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 575, + 526, + 598 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 575, + 526, + 598 + ], + "spans": [ + { + "bbox": [ + 304, + 575, + 526, + 598 + ], + "type": "text", + "content": "Daniel Kahneman. 2011. Thinking, fast and slow. Farrar, Straus and Giroux." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 608, + 526, + 654 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 608, + 526, + 654 + ], + "spans": [ + { + "bbox": [ + 304, + 608, + 526, + 654 + ], + "type": "text", + "content": "Mehran Kazemi, Hamidreza Alvari, Ankit Anand, Jialin Wu, Xi Chen, and Radu Soricut. 2023. Geomverse: A systematic evaluation of large models for geometric reasoning. Preprint, arXiv:2312.12241." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 663, + 526, + 719 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 663, + 526, + 719 + ], + "spans": [ + { + "bbox": [ + 304, + 663, + 526, + 719 + ], + "type": "text", + "content": "Xuanyu Lei, Zonghan Yang, Xinrui Chen, Peng Li, and Yang Liu. 2024. Scaffolding coordinates to promote vision-language coordination in large multi-modal models. In International Conference on Computational Linguistics." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 729, + 526, + 775 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 729, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 304, + 729, + 526, + 775 + ], + "type": "text", + "content": "Chengzu Li, Wenshan Wu, Huanyu Zhang, Yan Xia, Shaoguang Mao, Li Dong, Ivan Vuli'c, and Furu Wei. 2025. Imagine while reasoning in space: Multimodal visualization-of-thought." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 289, + 774 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 69, + 72, + 289, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 289, + 127 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 289, + 127 + ], + "type": "text", + "content": "Jiacheng Liu, Andrew Cohen, Ramakanth Pasunuru, Yejin Choi, Hannaneh Hajishirzi, and Asli Celikyilmaz. 2023. Don't throw away your value model! generating more preferable text with value-guided monte-carlo tree search decoding." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 136, + 289, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 136, + 289, + 190 + ], + "spans": [ + { + "bbox": [ + 69, + 136, + 289, + 190 + ], + "type": "text", + "content": "Pan Lu, Ran Gong, Shibiao Jiang, Liang Qiu, Siyuan Huang, Xiaodan Liang, and Song-Chun Zhu. 2021. Inter-gps: Interpretable geometry problem solving with formal language and symbolic reasoning. Preprint, arXiv:2105.04165." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 199, + 289, + 254 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 199, + 289, + 254 + ], + "spans": [ + { + "bbox": [ + 69, + 199, + 289, + 254 + ], + "type": "text", + "content": "Chancharik Mitra, Brandon Huang, Trevor Darrell, and Roei Herzig. 2023. Compositional chain-of-thought prompting for large multimodal models. 2024 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 14420-14431." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 261, + 289, + 317 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 261, + 289, + 317 + ], + "spans": [ + { + "bbox": [ + 69, + 261, + 289, + 317 + ], + "type": "text", + "content": "Debjyoti Mondal, Suraj Modi, Subhadarshi Panda, Ritraj Singh, and Godawari Sudhakar Rao. 2024. Kamcot: Knowledge augmented multimodal chain-of-thoughts reasoning. In AAAI Conference on Artificial Intelligence." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 324, + 289, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 324, + 289, + 346 + ], + "spans": [ + { + "bbox": [ + 69, + 324, + 289, + 346 + ], + "type": "text", + "content": "OpenAI. 2024a. Gpt-4o system card. Preprint, arXiv:2410.21276." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 354, + 255, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 354, + 255, + 365 + ], + "spans": [ + { + "bbox": [ + 69, + 354, + 255, + 365 + ], + "type": "text", + "content": "OpenAI. 2024b. Learning to reason with llms." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 373, + 289, + 439 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 373, + 289, + 439 + ], + "spans": [ + { + "bbox": [ + 69, + 373, + 289, + 439 + ], + "type": "text", + "content": "Runqi Qiao, Qiuna Tan, Guanting Dong, Minhui Wu, Chong Sun, Xiaoshuai Song, Zhuoma GongQue, Shanglin Lei, Zhe Wei, Miaoxuan Zhang, and 1 others. 2024. We-math: Does your large multimodal model achieve human-like mathematical reasoning? arXiv preprint arXiv:2407.01284." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 448, + 289, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 448, + 289, + 491 + ], + "spans": [ + { + "bbox": [ + 69, + 448, + 289, + 491 + ], + "type": "text", + "content": "Santhosh Kumar Ramakrishnan, Erik Wijmans, Philipp Kraehenbuehl, and Vladlen Koltun. 2024. Does spatial cognition emerge in frontier models? Preprint, arXiv:2410.06468." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 499, + 289, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 499, + 289, + 544 + ], + "spans": [ + { + "bbox": [ + 69, + 499, + 289, + 544 + ], + "type": "text", + "content": "Noah Shinn, Federico Cassano, Edward Berman, Ashwin Gopinath, Karthik Narasimhan, and Shunyu Yao. 2023. Reflexion: Language agents with verbal reinforcement learning. Preprint, arXiv:2303.11366." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 551, + 289, + 595 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 551, + 289, + 595 + ], + "spans": [ + { + "bbox": [ + 69, + 551, + 289, + 595 + ], + "type": "text", + "content": "Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. 2024. Scaling llm test-time compute optimally can be more effective than scaling model parameters. ArXiv, abs/2408.03314." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 603, + 289, + 648 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 603, + 289, + 648 + ], + "spans": [ + { + "bbox": [ + 69, + 603, + 289, + 648 + ], + "type": "text", + "content": "Hanshi Sun, Momin Haider, Ruiqi Zhang, Huitao Yang, Jiahao Qiu, Ming Yin, Mengdi Wang, Peter Bartlett, and Andrea Zanette. 2024. Fast best-of-n decoding via speculative rejection. ArXiv, abs/2410.20290." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 655, + 289, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 655, + 289, + 689 + ], + "spans": [ + { + "bbox": [ + 69, + 655, + 289, + 689 + ], + "type": "text", + "content": "Gemini Team. 2024. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. Preprint, arXiv:2403.05530." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 697, + 289, + 774 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 697, + 289, + 774 + ], + "spans": [ + { + "bbox": [ + 69, + 697, + 289, + 774 + ], + "type": "text", + "content": "Omkar Thawakar, Dinura Dissanayake, Ketan More, Ritesh Thawkar, Ahmed Heakl, Noor Ahsan, Yuhao Li, Mohammed Zumri, Jean Lahoud, Rao Muhammad Anwer, Hisham Cholakkal, Ivan Laptev, Mubarak Shah, Fahad Shahbaz Khan, and Salman H. Khan. 2025. Llamav-o1: Rethinking step-by-step visual reasoning in llms." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 305, + 72, + 525, + 761 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 305, + 72, + 524, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 524, + 116 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 524, + 116 + ], + "type": "text", + "content": "Wenshan Wu, Shaoguang Mao, Yadong Zhang, Yan Xia, Li Dong, Lei Cui, and Furu Wei. 2024. Mind's eye of llms: Visualization-of-thought elicits spatial reasoning in large language models." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 125, + 525, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 125, + 525, + 190 + ], + "spans": [ + { + "bbox": [ + 305, + 125, + 525, + 190 + ], + "type": "text", + "content": "Kun Xiang, Zhili Liu, Zihao Jiang, Yunshuang Nie, Runhui Huang, Haoxiang Fan, Hanhui Li, Weiran Huang, Yihan Zeng, Jianhua Han, Lanqing Hong, Hang Xu, and Xiaodan Liang. 2024. Atomthink: A slow thinking framework for multimodal mathematical reasoning. Preprint, arXiv:2411.11930." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 200, + 525, + 244 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 200, + 525, + 244 + ], + "spans": [ + { + "bbox": [ + 305, + 200, + 525, + 244 + ], + "type": "text", + "content": "Yuxi Xie, Kenji Kawaguchi, Yiran Zhao, Xu Zhao, MingSung Kan, Junxian He, and Qizhe Xie. 2023. Self-evaluation guided beam search for reasoning. In Neural Information Processing Systems." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 252, + 525, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 252, + 525, + 285 + ], + "spans": [ + { + "bbox": [ + 305, + 252, + 525, + 285 + ], + "type": "text", + "content": "Guowei Xu, Peng Jin, Hao Li, Yibing Song, Lichao Sun, and Li Yuan. 2024. Llava-cot: Let vision language models reason step-by-step. ArXiv, abs/2411.10440." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 305, + 294, + 525, + 349 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 294, + 525, + 349 + ], + "spans": [ + { + "bbox": [ + 305, + 294, + 525, + 349 + ], + "type": "text", + "content": "Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Ehsan Azarnasab, Faisal Ahmed, Zicheng Liu, Ce Liu, Michael Zeng, and Lijuan Wang. 2023. Mm-react: Prompting chatgpt for multimodal reasoning and action. ArXiv, abs/2303.11381." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 305, + 358, + 525, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 358, + 525, + 423 + ], + "spans": [ + { + "bbox": [ + 305, + 358, + 525, + 423 + ], + "type": "text", + "content": "Huanjin Yao, Jiaxing Huang, Wenhao Wu, Jingyi Zhang, Yibo Wang, Shunyu Liu, Yingjie Wang, Yuxin Song, Haocheng Feng, Li Shen, and Dacheng Tao. 2024. Mulberry: Empowering mllm with o1-like reasoning and reflection via collective monte carlo tree search. ArXiv, abs/2412.18319." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 305, + 432, + 525, + 477 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 432, + 525, + 477 + ], + "spans": [ + { + "bbox": [ + 305, + 432, + 525, + 477 + ], + "type": "text", + "content": "Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik Narasimhan, and Yuan Cao. 2023. React: Synergizing reasoning and acting in language models. Preprint, arXiv:2210.03629." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 305, + 485, + 525, + 518 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 485, + 525, + 518 + ], + "spans": [ + { + "bbox": [ + 305, + 485, + 525, + 518 + ], + "type": "text", + "content": "Fei Yu, Anningzhe Gao, and Benyou Wang. 2023. Ovm, outcome-supervised value models for planning in mathematical reasoning. In NAACL-HLT." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 305, + 527, + 525, + 592 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 527, + 525, + 592 + ], + "spans": [ + { + "bbox": [ + 305, + 527, + 525, + 592 + ], + "type": "text", + "content": "Zhiyuan Zeng, Qinyuan Cheng, Zhangyue Yin, Bo Wang, Shimin Li, Yunhua Zhou, Qipeng Guo, Xuanjing Huang, and Xipeng Qiu. 2024. Scaling of search and learning: A roadmap to reproduce o1 from reinforcement learning perspective. Preprint, arXiv:2412.14135." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 305, + 602, + 525, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 602, + 525, + 645 + ], + "spans": [ + { + "bbox": [ + 305, + 602, + 525, + 645 + ], + "type": "text", + "content": "Zhuosheng Zhang, Aston Zhang, Mu Li, Hai Zhao, George Karypis, and Alexander J. Smola. 2023. Multimodal chain-of-thought reasoning in language models. Trans. Mach. Learn. Res., 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 305, + 654, + 525, + 698 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 654, + 525, + 698 + ], + "spans": [ + { + "bbox": [ + 305, + 654, + 525, + 698 + ], + "type": "text", + "content": "Wenliang Zhao, Yongming Rao, Zuyan Liu, Benlin Liu, Jie Zhou, and Jiwen Lu. 2023. Unleashing text-to-image diffusion models for visual perception. Preprint, arXiv:2303.02153." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 305, + 707, + 525, + 761 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 707, + 525, + 761 + ], + "spans": [ + { + "bbox": [ + 305, + 707, + 525, + 761 + ], + "type": "text", + "content": "Qiji Zhou, Ruochen Zhou, Nike Hu, Panzhong Lu, Siyang Gao, and Yue Zhang. 2024. Image-of-thought prompting for visual reasoning refinement in multimodal large language models. ArXiv, abs/2405.13872." + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 70, + 270, + 100 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 70, + 270, + 100 + ], + "spans": [ + { + "bbox": [ + 68, + 70, + 270, + 100 + ], + "type": "text", + "content": "A Performance Gain of VisuoThink Through Predictive Rollout Search" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 126, + 291, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 126, + 291, + 233 + ], + "spans": [ + { + "bbox": [ + 67, + 126, + 291, + 233 + ], + "type": "text", + "content": "This appendix quantifies the performance improvements achieved by integrating predictive rollout search into the VisuoThink framework across geometry and spatial reasoning tasks. The performance gain through predictive rollout search is derived by subtracting the performance of VisuoThink (w/o rollout search) from those of the VisuoThink on models." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 253, + 291, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 253, + 291, + 390 + ], + "spans": [ + { + "bbox": [ + 67, + 253, + 291, + 390 + ], + "type": "text", + "content": "As shown in Table 3, tasks with strong supervision (e.g., Visual Navigation and Visual Tiling) exhibit significantly higher gains compared to weak supervision tasks (e.g., Geometry " + }, + { + "bbox": [ + 67, + 253, + 291, + 390 + ], + "type": "inline_equation", + "content": "3K" + }, + { + "bbox": [ + 67, + 253, + 291, + 390 + ], + "type": "text", + "content": " and Geomverse-109). For instance, under strong supervision, Claude-3.5-Sonnet achieves a " + }, + { + "bbox": [ + 67, + 253, + 291, + 390 + ], + "type": "inline_equation", + "content": "+25.1\\%" + }, + { + "bbox": [ + 67, + 253, + 291, + 390 + ], + "type": "text", + "content": " improvement in Visual Navigation, while GPT-4o attains " + }, + { + "bbox": [ + 67, + 253, + 291, + 390 + ], + "type": "inline_equation", + "content": "+16.6\\%" + }, + { + "bbox": [ + 67, + 253, + 291, + 390 + ], + "type": "text", + "content": " in Visual Tiling. In contrast, weak supervision tasks like Geomverse-109 only show modest gains (e.g., " + }, + { + "bbox": [ + 67, + 253, + 291, + 390 + ], + "type": "inline_equation", + "content": "+5.4\\%" + }, + { + "bbox": [ + 67, + 253, + 291, + 390 + ], + "type": "text", + "content": " for GPT-4o)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 430, + 269, + 445 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 430, + 269, + 445 + ], + "spans": [ + { + "bbox": [ + 67, + 430, + 269, + 445 + ], + "type": "text", + "content": "B OKSpatial Reasoning Task Setting" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 471, + 291, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 471, + 291, + 581 + ], + "spans": [ + { + "bbox": [ + 67, + 471, + 291, + 581 + ], + "type": "text", + "content": "Our formulation extends beyond VoT's basic requirements by mandating LVLMs to generate comprehensive operational specifications - for instance, requiring explicit output of both movement directions and precise step counts at each decision node. This advancement creates more realistic and functionally grounded spatial reasoning evaluations (e.g., robotic navigation emulation in real world)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 599, + 292, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 599, + 292, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 599, + 292, + 775 + ], + "type": "text", + "content": "This appendix details the task formulation differences between VisuoThink and baseline methods (Table 4 and Table 5). For Visual Navigation, VisuoThink requires fine-grained, executable and explicit specification of both direction and step count in action sequences, whereas VoT focuses solely on direction navigation. This formulation mirrors real-world robotic navigation, where precise movement planning is critical. Similarly, in Visual Tiling, VisuoThink mandates detailed actions, including polyomino variant types, block positions, and action types (e.g., \"fit\" or \"remove\"), while VoT simplifies the task by omitting variant specifications." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 303, + 70, + 525, + 97 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 70, + 525, + 97 + ], + "spans": [ + { + "bbox": [ + 303, + 70, + 525, + 97 + ], + "type": "text", + "content": "C Task Formulation of Spatial Reasoning Tasks" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 105, + 525, + 132 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 105, + 525, + 132 + ], + "spans": [ + { + "bbox": [ + 302, + 105, + 525, + 132 + ], + "type": "text", + "content": "Building upon VoT (Wu et al., 2024) framework, our challenging benchmarks comprise:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 316, + 139, + 526, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 139, + 526, + 316 + ], + "spans": [ + { + "bbox": [ + 316, + 139, + 526, + 316 + ], + "type": "text", + "content": "- Visual Navigation evaluates LVLMs in a simulated 2D grid environment, where agents must navigate from initial position " + }, + { + "bbox": [ + 316, + 139, + 526, + 316 + ], + "type": "inline_equation", + "content": "\\mathbf{s}_0" + }, + { + "bbox": [ + 316, + 139, + 526, + 316 + ], + "type": "text", + "content": " to destination " + }, + { + "bbox": [ + 316, + 139, + 526, + 316 + ], + "type": "inline_equation", + "content": "\\mathbf{s}_k" + }, + { + "bbox": [ + 316, + 139, + 526, + 316 + ], + "type": "text", + "content": " through obstacle-laden paths. The formal problem is defined by grid map " + }, + { + "bbox": [ + 316, + 139, + 526, + 316 + ], + "type": "inline_equation", + "content": "\\mathbf{M}" + }, + { + "bbox": [ + 316, + 139, + 526, + 316 + ], + "type": "text", + "content": " containing " + }, + { + "bbox": [ + 316, + 139, + 526, + 316 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 316, + 139, + 526, + 316 + ], + "type": "text", + "content": " interconnected edges " + }, + { + "bbox": [ + 316, + 139, + 526, + 316 + ], + "type": "inline_equation", + "content": "\\mathbf{E} = \\{\\mathbf{e}(\\mathbf{s}_0,\\mathbf{s}_1),\\mathbf{e}(\\mathbf{s}_1,\\mathbf{s}_2),\\ldots ,\\mathbf{e}(\\mathbf{s}_{k - 1},\\mathbf{s}_k)\\}" + }, + { + "bbox": [ + 316, + 139, + 526, + 316 + ], + "type": "text", + "content": ". The LVLM should generate a sequence of executable actions in json format " + }, + { + "bbox": [ + 316, + 139, + 526, + 316 + ], + "type": "inline_equation", + "content": "\\mathbf{A} = \\{(\\mathbf{d}_0,\\mathbf{l}_0),(\\mathbf{d}_1,\\mathbf{l}_1),\\ldots ,(\\mathbf{d}_{|\\mathbf{A}| - 1},\\mathbf{l}_{|\\mathbf{A}| - 1})\\}" + }, + { + "bbox": [ + 316, + 139, + 526, + 316 + ], + "type": "text", + "content": ", where each tuple specifies movement direction " + }, + { + "bbox": [ + 316, + 139, + 526, + 316 + ], + "type": "inline_equation", + "content": "\\mathbf{d}_i" + }, + { + "bbox": [ + 316, + 139, + 526, + 316 + ], + "type": "text", + "content": " and exact step count " + }, + { + "bbox": [ + 316, + 139, + 526, + 316 + ], + "type": "inline_equation", + "content": "\\mathbf{l}_i" + }, + { + "bbox": [ + 316, + 139, + 526, + 316 + ], + "type": "text", + "content": ", governed by the policy:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 365, + 323, + 525, + 338 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 365, + 323, + 525, + 338 + ], + "spans": [ + { + "bbox": [ + 365, + 323, + 525, + 338 + ], + "type": "interline_equation", + "content": "\\mathbf {a} _ {\\mathbf {t}} \\sim \\mathcal {P} \\left(\\mathbf {d} _ {t}, \\mathbf {l} _ {t} \\mid \\mathbf {A} _ {t - 1}, \\mathbf {M}\\right) \\tag {3}", + "image_path": "ac64c958c970ea6e08d36c9e2fd472841989265e83951721a027f0c92d45d633.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 316, + 349, + 527, + 512 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 349, + 527, + 512 + ], + "spans": [ + { + "bbox": [ + 316, + 349, + 527, + 512 + ], + "type": "text", + "content": "- Visual Tiling is a classic geometric reasoning challenge, this task assesses polyomino composition capabilities within confined rectangular regions " + }, + { + "bbox": [ + 316, + 349, + 527, + 512 + ], + "type": "inline_equation", + "content": "\\mathbf{R}" + }, + { + "bbox": [ + 316, + 349, + 527, + 512 + ], + "type": "text", + "content": " masked by " + }, + { + "bbox": [ + 316, + 349, + 527, + 512 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 316, + 349, + 527, + 512 + ], + "type": "text", + "content": " distinct polyominoes " + }, + { + "bbox": [ + 316, + 349, + 527, + 512 + ], + "type": "inline_equation", + "content": "\\mathbf{MP} = \\{\\mathbf{mp}_1,\\dots ,\\mathbf{mp}_k\\}" + }, + { + "bbox": [ + 316, + 349, + 527, + 512 + ], + "type": "text", + "content": ". The LVLM must output action sequences " + }, + { + "bbox": [ + 316, + 349, + 527, + 512 + ], + "type": "inline_equation", + "content": "\\mathbf{a}_t = (\\mathbf{p}_t,\\{\\mathbf{b}_1,\\dots ,\\mathbf{b}_{|B|}\\},\\mathbf{a}\\mathbf{t}_t)" + }, + { + "bbox": [ + 316, + 349, + 527, + 512 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 316, + 349, + 527, + 512 + ], + "type": "inline_equation", + "content": "\\mathbf{p}_t" + }, + { + "bbox": [ + 316, + 349, + 527, + 512 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 316, + 349, + 527, + 512 + ], + "type": "inline_equation", + "content": "\\mathbf{B} = \\{\\mathbf{b}_1,\\dots ,\\mathbf{b}_{|\\mathbf{B}|}\\}" + }, + { + "bbox": [ + 316, + 349, + 527, + 512 + ], + "type": "text", + "content": " respectively indicate the selected polyomino type and the coordinates of the placement blocks. " + }, + { + "bbox": [ + 316, + 349, + 527, + 512 + ], + "type": "inline_equation", + "content": "\\mathbf{at}_t\\in \\{\\text{fit, remove}\\}" + }, + { + "bbox": [ + 316, + 349, + 527, + 512 + ], + "type": "text", + "content": " indicates the action type modifying rectangular state " + }, + { + "bbox": [ + 316, + 349, + 527, + 512 + ], + "type": "inline_equation", + "content": "\\mathbf{R}_t" + }, + { + "bbox": [ + 316, + 349, + 527, + 512 + ], + "type": "text", + "content": ", thus formalized as:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 335, + 536, + 525, + 564 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 536, + 525, + 564 + ], + "spans": [ + { + "bbox": [ + 335, + 536, + 525, + 564 + ], + "type": "interline_equation", + "content": "\\mathbf {a} _ {t} \\sim \\mathcal {P} \\left(\\mathbf {p} _ {t}, \\mathbf {B}, \\mathbf {a t} _ {t} \\mid \\mathbf {R} _ {t - 1}, \\mathbf {M P}, \\mathbf {A} _ {t - 1} \\}\\right) \\tag {4}", + "image_path": "52f987e1a32a2d4014ff1a9c4711e76163fd34e624888813f4d62b0f8e10f06d.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 323, + 568, + 525, + 636 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 568, + 525, + 636 + ], + "spans": [ + { + "bbox": [ + 323, + 568, + 525, + 636 + ], + "type": "text", + "content": "Though the required actions are polyomino variant-aware as shown in table 5. As the polyomino variant type is implicitly expressed in the block positions, LVLM does not need to explicitly output it in actions anymore." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 117, + 68, + 477, + 153 + ], + "blocks": [ + { + "bbox": [ + 117, + 68, + 477, + 153 + ], + "lines": [ + { + "bbox": [ + 117, + 68, + 477, + 153 + ], + "spans": [ + { + "bbox": [ + 117, + 68, + 477, + 153 + ], + "type": "table", + "html": "
Supervision TypePerformance GainGPT-4oQwen2-VL-72BClaude-3.5-Sonnet
Strong SupervisionΔ Visual Navigation (%)+16.6+18.9+15.5
Δ Visual Tiling (%)+31.9+11.0+3.3
Δ Average (%)+24.3+15.0+9.4
Weak SupervisionΔ Geometry3K (%)+4.5+6.6+1.1
Δ Geomverse-109 (%)+6.2+4.2+6.3
Δ Average (%)+5.4+5.4+3.7
", + "image_path": "91b03a9f72e061c8c659c7721beb2d061621a2d5b30b55e226084a1e7228568a.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 117, + 195, + 477, + 238 + ], + "blocks": [ + { + "bbox": [ + 67, + 160, + 525, + 185 + ], + "lines": [ + { + "bbox": [ + 67, + 160, + 525, + 185 + ], + "spans": [ + { + "bbox": [ + 67, + 160, + 525, + 185 + ], + "type": "text", + "content": "Table 3: Detailed performance gain of VisuoThink through predictive rollout search on benchmarks from Geometry and Spatial Reasoning over variable LVLM models." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 117, + 195, + 477, + 238 + ], + "lines": [ + { + "bbox": [ + 117, + 195, + 477, + 238 + ], + "spans": [ + { + "bbox": [ + 117, + 195, + 477, + 238 + ], + "type": "table", + "html": "
MethodDirectionStepsTarget
Visual NavigationVoT×Navigate from the starting position
VisuoThinkto the destination.
", + "image_path": "fffc5b6abb65b39d44f783e6567ff333b981cb2198deaf81d309e62f10b77cef.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 71, + 269, + 523, + 336 + ], + "blocks": [ + { + "bbox": [ + 129, + 246, + 462, + 259 + ], + "lines": [ + { + "bbox": [ + 129, + 246, + 462, + 259 + ], + "spans": [ + { + "bbox": [ + 129, + 246, + 462, + 259 + ], + "type": "text", + "content": "Table 4: Visual Navigation task setting differences between VoT and VisuoThink." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 71, + 269, + 523, + 336 + ], + "lines": [ + { + "bbox": [ + 71, + 269, + 523, + 336 + ], + "spans": [ + { + "bbox": [ + 71, + 269, + 523, + 336 + ], + "type": "table", + "html": "
MethodActionTarget
Polyomino TypeVariant TypeBlock PositionsAction Type
Visual TilingVoTTo identify the correct variant for a polyomino in one action.
VisuoThinkTo fill the rectangle with feasible polyomino variants.
", + "image_path": "4c3f7c05a73cf0d6a4150e4bdd1954214451acc2c175ad7f630e3d5fdb677bbd.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 140, + 343, + 451, + 356 + ], + "lines": [ + { + "bbox": [ + 140, + 343, + 451, + 356 + ], + "spans": [ + { + "bbox": [ + 140, + 343, + 451, + 356 + ], + "type": "text", + "content": "Table 5: Visual Tiling task setting differences between VoT and VisuoThink." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 376, + 208, + 405 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 376, + 208, + 405 + ], + "spans": [ + { + "bbox": [ + 67, + 376, + 208, + 405 + ], + "type": "text", + "content": "D Model and VisuoThink Hyperparameters" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 413, + 291, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 413, + 291, + 439 + ], + "spans": [ + { + "bbox": [ + 67, + 413, + 291, + 439 + ], + "type": "text", + "content": "We detail the model and VisuoThink Hyperparameters:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 449, + 291, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 449, + 291, + 543 + ], + "spans": [ + { + "bbox": [ + 67, + 449, + 291, + 543 + ], + "type": "text", + "content": "Model Hyperparameters To ensure experimental fairness, we uniformly constrained the number of reasoning steps (i.e., " + }, + { + "bbox": [ + 67, + 449, + 291, + 543 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 67, + 449, + 291, + 543 + ], + "type": "text", + "content": ", the depth of the reasoning tree) to 10 across all experiments. During predictive rollout search, we set the number of sampled child nodes to 3, and we discuss its impact in section 6.2." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 552, + 291, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 552, + 291, + 701 + ], + "spans": [ + { + "bbox": [ + 67, + 552, + 291, + 701 + ], + "type": "text", + "content": "VisuoThink Hyperparameters While VisuoThink employed a temperature of 0.8 when sampling child nodes, all other model invocations, including the baselines (e.g. CoT, VoT, VisualSketchpad, VisuoThink w/o rollout search), were conducted with temperature set to " + }, + { + "bbox": [ + 67, + 552, + 291, + 701 + ], + "type": "inline_equation", + "content": "O" + }, + { + "bbox": [ + 67, + 552, + 291, + 701 + ], + "type": "text", + "content": " for frontier performance. During the voting phase, we similarly maintained a temperature of " + }, + { + "bbox": [ + 67, + 552, + 291, + 701 + ], + "type": "inline_equation", + "content": "O" + }, + { + "bbox": [ + 67, + 552, + 291, + 701 + ], + "type": "text", + "content": " and implemented single-vote sampling, which not only reduced computational overhead in terms of model calls but also achieved comparable performance." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 711, + 278, + 740 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 711, + 278, + 740 + ], + "spans": [ + { + "bbox": [ + 67, + 711, + 278, + 740 + ], + "type": "text", + "content": "E Geomverse-109 Problem Generation Trajectory" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 748, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 748, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 748, + 291, + 775 + ], + "type": "text", + "content": "We establish a pipeline translating textual problems into problems with matplotlib-executable code. Be" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 377, + 526, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 377, + 526, + 541 + ], + "spans": [ + { + "bbox": [ + 302, + 377, + 526, + 541 + ], + "type": "text", + "content": "yond the Geometry3K (Lu et al., 2021) dataset (48 problems) utilized in Sketchpad, we incorporate the D2 subset of Geomverse (Kazemi et al., 2023) to construct an slightly bigger dataset Geomverse-109 (90 problems). The original Geomverse dataset crucially includes annotated point coordinates essential for systematic problem synthesis. During the data synthesis phase, we first randomly choose 109 problems, then LVLMs generate corresponding high-quality Python code through LLM self-reflection (Shinn et al., 2023), then we filter out problems with poor diagram quality." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 11 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09153/041c4cb9-df29-4fd7-8ac4-53350c684566_content_list.json b/data/2025/2504_09xxx/2504.09153/041c4cb9-df29-4fd7-8ac4-53350c684566_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..22267e92bfecd4df66ff09ed909d0b525f0bb2ef --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/041c4cb9-df29-4fd7-8ac4-53350c684566_content_list.json @@ -0,0 +1,3325 @@ +[ + { + "type": "text", + "text": "Secure Physical Layer Communications for Low-Altitude Economy Networking: A Survey", + "text_level": 1, + "bbox": [ + 119, + 70, + 875, + 140 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Lingyi Cai, Jiacheng Wang, Ruichen Zhang, Yu Zhang, Tao Jiang, Fellow, IEEE, Dusit Niyato, Fellow, IEEE, Xianbin Wang, Fellow, IEEE, Abbas Jamalipour, Fellow, IEEE, and Xuemin Shen, Fellow, IEEE", + "bbox": [ + 89, + 147, + 906, + 183 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract—The Low-Altitude Economy Networking (LAENet) is emerging as a transformative paradigm that enables an integrated and sophisticated communication infrastructure to support aerial vehicles in carrying out a wide range of economic activities within low-altitude airspace. However, the physical layer communications in the LAENet face growing security threats due to inherent characteristics of aerial communication environments, such as signal broadcast nature and channel openness. These challenges highlight the urgent need for safeguarding communication confidentiality, availability, and integrity. In view of the above, this survey comprehensively reviews existing secure countermeasures for physical layer communication in the LAENet. We explore core methods focusing on anti-eavesdropping and authentication for ensuring communication confidentiality. Subsequently, availability-enhancing techniques are thoroughly discussed for anti-jamming and spoofing defense. Then, we review approaches for safeguarding integrity through anomaly detection and injection protection. Furthermore, we discuss future research directions, emphasizing energy-efficient physical layer security, multi-drone collaboration for secure communication, AI-driven security defense strategy, space-air-ground integrated security architecture, and 6G-enabled secure UAV communication. This survey may provide valuable references and new insights for researchers in the field of secure physical layer communication for the LAENet.", + "bbox": [ + 73, + 234, + 491, + 551 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Index Terms—Low-altitude economy networking, secure physical layer communications, communication confidentiality, communication availability, communication integrity.", + "bbox": [ + 73, + 556, + 491, + 598 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "I. INTRODUCTION", + "text_level": 1, + "bbox": [ + 215, + 614, + 351, + 628 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "WITH the rapid development of aerial vehicle technologies and communication networks, the concept of Low-Altitude Economic Networking (LAENet) has emerged to enable more comprehensive, large-scale, and intelligent", + "bbox": [ + 73, + 633, + 491, + 695 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Lingyi Cai is with the Research Center of 6G Mobile Communications, School of Cyber Science and Engineering, Huazhong University of Science and Technology, Wuhan, 430074, China, and also with the College of Computing and Data Science, Nanyang Technological University, Singapore (e-mail: lingyicai@hust.edu.cn).", + "bbox": [ + 73, + 704, + 491, + 762 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jiacheng Wang, Ruichen Zhang, and Dusit Niyato are with the College of Computing and Data Science, Nanyang Technological University, Singapore (e-mails: jiacheng.wang@ntu.edu.sg; ruichen.zhang@ntu.edu.sg; dniyato@ntu.edu.sg).", + "bbox": [ + 73, + 761, + 491, + 808 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yu Zhang and Tao Jiang are with the Research Center of 6G Mobile Communications, School of Cyber Science and Engineering, Huazhong University of Science and Technology, Wuhan, 430074, China (e-mail: yuzhang123@hust.edu.cn; tao.jiang@ieee.org).", + "bbox": [ + 73, + 806, + 491, + 854 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xianbin Wang is with the Department of Electrical and Computer Engineering, Western University, London, ON, N6A 5B9, Canada (e-mail: xianbin.wang@uwo.ca).", + "bbox": [ + 73, + 852, + 491, + 887 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abbas Jamalipour is with the School of Electrical and Computer Engineering, University of Sydney, Australia (e-mail: a.jamalipour@ieee.org).", + "bbox": [ + 73, + 886, + 491, + 909 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xuemin Shen is with the Department of Electrical and Computer Engineering, University of Waterloo, Waterloo, ON N2L 3G1, Canada (e-mail: sshen@uwaterloo.ca).", + "bbox": [ + 73, + 909, + 491, + 944 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "connectivity to support various low-altitude activities [1]–[4], such as intelligent transportation, logistics delivery, communication enhancement, disaster monitoring, and emergency response [5]–[8], as shown in Fig. 1. The LAENet is built upon earlier frameworks of single Unmanned Aerial Vehicle (UAV) operation and multi-UAV networks. A single UAV typically maintains a direct link to a ground station or base station, operating with simple control procedures and delivering cost-effective services but with limited range and scalability [9]. The UAV network focuses on formation control and multi-UAV collaboration, enabling broader mission areas and stronger fault tolerance [9]–[11]. Advancing from these foundations, the LAENet integrates various aerial vehicles into a high-density communication network, connecting them not only to ground stations but also to other platforms such as base stations, access points, and even satellites [12], [13]. Thus, the LAENet can enable ubiquitous coverage, high reliability, robust fault tolerance, greater autonomy, and intelligence.", + "bbox": [ + 501, + 234, + 921, + 506 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Specifically, the LAENet refers to an integrated network system that connects various low-altitude flight operations, including general aviation, drones, electric vertical take-off and landing (eVTOL) aircraft, and other aerial platforms, within the designated low-altitude airspace (typically below 1,000 meters, and in some cases extending up to 3,000 meters) [1], [13]. The LAENet serves as a vital bridge between ground-based economies and airspace resources, which will drive technological innovation and unlock substantial social and economic benefits [14], [15]. The Civil Aviation Administration of China estimates that the country's low-altitude market will soar from 500 billion Chinese yuan (about 70 billion US dollars) in 2023 to 1.5 trillion Chinese yuan (about 200 billion US dollars) in 2025 and as much as 3.5 trillion Chinese yuan (about 480 billion US dollars) in 2035 [16]. Currently, research institutions and enterprises across multiple regions in China are continuously advancing and expanding innovative research and commercial applications of UAVs and eVTOLs in low-altitude activities [17]. Meanwhile, in the United States, the Federal Aviation Administration has confirmed its commitment to actively promoting the development of electric air taxis and integrating this type of aircraft into the national airspace [18].", + "bbox": [ + 503, + 507, + 921, + 852 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In the LAENet, physical layer communication serves as a critical foundation for wireless communication between aerial vehicles and between aerial vehicles and communication infrastructure [10], [28], [31]. The physical layer converts digital data from higher protocol layers into signals suitable for transmission over aerial communication channels [32]–[34].", + "bbox": [ + 501, + 854, + 923, + 946 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 911, + 30, + 919, + 40 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.09153v1 [cs.CR] 12 Apr 2025", + "bbox": [ + 22, + 276, + 58, + 720 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/bd4f13f7a34b1eba31da2c93966f6b7ca47565af3508ca10543f719898a1c39b.jpg", + "image_caption": [ + "Fig. 1. The overall architecture of the LAENet covers the main application scenarios, including emergency monitoring and response, temporary communication relay, communication coverage expansion, low-altitude smart logistics, and urban air mobility. The table compares the similarities and differences between the LAENet, single UAV, and UAV networks, representing the evolution of the LAENet." + ], + "image_footnote": [], + "bbox": [ + 81, + 66, + 919, + 406 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This process encompasses encoding data into bit sequences, modulating them onto carrier waves, and ensuring reliable signal propagation through the wireless medium [32], [35], [36]. At the receiver side, the physical layer performs inverse operations, including demodulating the incoming signals, decoding the bit sequences, and passing the data to upper layers for further processing [37]–[39]. Therefore, the physical layer supports the core communication mechanisms in the LAENet and plays a crucial role in its aerial deployment. For example, aerial vehicles deployed as aerial base stations (ABSs) or aerial relays can overcome interference, signal distortion, and environmental variations inherent in communication links by using physical layer functionalities such as channel access, multiplexing, and channel equalization [33], [40], [41].", + "bbox": [ + 73, + 479, + 491, + 691 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "However, physical layer communication in the LAENet is exposed to a variety of security threats due to the inherent characteristics of aerial communication environments [42]. The broadcast nature of wireless signals and the prevalence of line-of-sight (LoS) propagation make aerial links particularly vulnerable to eavesdropping, jamming, and spoofing attacks [1], [43]. These attacks can compromise communication confidentiality, disrupt communication, or deceive aerial vehicles by impersonating legitimate transmitters [44], [45]. Furthermore, the openness of wireless channels and weak authentication mechanisms increase the risk of unauthorized access and injection attacks, allowing adversaries to infiltrate the network or inject malicious signals [46], [47]. Additionally, the open medium and dynamic spectrum access may cause anomalous behaviors to disrupt normal communication operations in the LAENet [48], [49].", + "bbox": [ + 73, + 702, + 491, + 944 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Confronted with these substantial security challenges, this paper conducts a comprehensive analysis on physical layer communications of the LAENet and provides a thorough survey of technologies and solutions to address communication confidentiality, availability, and integrity. Table II gives a clear structure for showing existing efforts on secure physical layer communications for the LAENet.", + "bbox": [ + 501, + 479, + 921, + 584 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "A. Related Surveys", + "text_level": 1, + "bbox": [ + 504, + 607, + 638, + 623 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Recently, a number of excellent survey and tutorial papers have overviewed security issues in UAV networks and communications and have summarized corresponding countermeasures and solutions, as shown in Table I. Some works consider security issues at the system level including intrusion, privacy, and trust issues. The work in [19] provides a comprehensive review of security threats facing UAVs and UAV networks, including communication vulnerabilities, sensor spoofing, jamming, and malware attacks. It examines various countermeasures such as encryption, global positioning system (GPS) spoofing mitigation, and firmware signing. A gap analysis is performed to identify remaining security vulnerabilities and provide recommendations for future UAV development. The study in [20] conducts a comprehensive review of security issues in UAV swarm networks, examining various potential attacks such as communication attacks, identity-based attacks, resource attacks, routing attacks, data attacks, and machine learning (ML) attacks. It categorizes these threats and presents corresponding security technologies and countermeasures, including cryptography, physical layer security techniques, blockchain, machine learning, and intrusion detection", + "bbox": [ + 501, + 627, + 923, + 946 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 911, + 30, + 919, + 40 + ], + "page_idx": 1 + }, + { + "type": "table", + "img_path": "images/34be38f581f31f563210090b46c7023435f7f21295c66f10111880e33a75aa87.jpg", + "table_caption": [ + "TABLEI SUMMARY OF RELATED SURVEYS" + ], + "table_footnote": [], + "table_body": "
ReferencesFocus
[19]A review of cybersecurity threats, countermeasures, and research gaps in UAV networks, with a focus on emerging attack surfaces and commercial UAV applications
[20]A survey of security threats, vulnerabilities, and countermeasures in UAV swarm networks, with a focus on classifying attack types and reviewing emerging defense technologies
[21]A review of security threats, vulnerabilities, and countermeasures in UAVs and Flying Ad Hoc Networks with attack surface analysis with simulation-based evaluation
[22]A survey of vulnerabilities across software, hardware, and communication layers in UAV systems, and an exploration of emerging defense technologies
[23]A survey of security challenges in drone communication and a review of emerging technologies used to enhance the speed, reliability, and security of UAV networks
[24]A review of UAV security challenges, existing controls, and future research directions, with an emphasis on the transformative role of AI in enabling secure UAV systems
[25]A review of security threats classified from a cyberspace security perspective and countermeasures in UAV systems
[26]A survey of security threats, requirements, and counter-measures in UAV-aided Internet of Things (IoT) applications
[27]A survey of cybersecurity vulnerabilities and countermeasures in UAV systems, integrating threat classification, communication protocols, and emerging techniques
[28]A survey of PLS in UAV communications, focusing on key challenges, methodologies, and recent advancements for both static and mobile UAV deployment scenarios
[29]A review of security challenges, practical deployment aspects, and standardization progress associated with integrating UAVs into cellular networks
[30]A survey of layer-wise cybersecurity threats and AI-enabled countermeasures in UAV-assisted IoT applications
", + "bbox": [ + 80, + 103, + 486, + 531 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "systems. The authors in [21] provide a detailed examination of security challenges in UAVs and FANETs, covering various attack vectors including communication, identity-based, resource, routing, data, and machine learning attacks. The study in [22] examines security and privacy vulnerabilities in UAV systems across hardware, software, and communication layers. It discusses various threats such as eavesdropping and jamming attacks, and presents defense mechanisms including blockchain, machine learning-based intrusion detection, and secure communication protocols.", + "bbox": [ + 73, + 564, + 491, + 715 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Some studies emphasize cyber security challenges within UAV networks. The study in [23] comprehensively reviews security issues in drone communication, including Denial of Service (DoS), GPS spoofing, and man-in-the-middle attacks. It examines vulnerabilities across different drone applications and presents countermeasures using blockchain, software-defined networks, machine learning, and fog computing. The authors of [24] provide a comprehensive survey of security challenges in UAV systems, including various types of attacks, privacy concerns, and trust issues. It identifies current research trends and gaps while establishing a future roadmap with a focus on artificial intelligence (AI)'s potential to enhance UAV security. The authors in [25] provide a comprehensive review of security issues in UAV networks, examining various potential attacks such as spoofing, replay, jamming, and", + "bbox": [ + 73, + 718, + 491, + 946 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "TABLE II", + "text_level": 1, + "bbox": [ + 684, + 71, + 741, + 82 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "CHALLENGES AND SOLUTIONS", + "text_level": 1, + "bbox": [ + 625, + 84, + 800, + 93 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "RED CIRCLES DESCRIBE THE SECURITY ISSUES; GREEN CIRCLES REPRESENT THE OVERALL COUNTERMEASURES FOR THE SECURITY ISSUES; GREEN CHECK MARKERS INDICATE DIFFERENT TYPES OF SOLUTIONS UNDER EACH COUNTERMEASURE", + "bbox": [ + 526, + 95, + 901, + 138 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/59767b138893846c4828b5215fcedab577ef94ef050884296e36a852a110260c.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Section III, Challenge 1: Communication confidentiality
Issues● Eavesdropping attack [46], [50]● Unauthorized access [31], [51], [52]
Solutions● Anti-eavesdropping strategies√ Convex optimization-based strategies [53]–[59]√ Reinforcement learning-based strategies [60]–[65]√ Deep learning-based strategies [66]–[71]● Communication authentication√ PUFs-based authentication [72]–[74]√ Channel based-authentication [75]–[77]
Section IV, Challenge 2: Communication availability
Issues● Jamming attack [48], [78], [79]● Spoofing attack [49], [50], [52], [78]
Solutions● Anti-jamming strategies√ Convex optimization [80]–[82]√ Single-agent RL [83]–[86]√ Multi-agent RL [87]–[89]● Spoofing defense√ PLA [77], [90], [91]√ GNSS spoofing detection [92]–[94]
Section V, Challenge 3: Communication Integrity
Issues● Anomalous behaviors [61], [95], [96]● Injection attacks [28], [46], [97]
Solutions● Anomaly detection√ Jamming anomaly detection [98]–[101]√ Abnormal power detection [102]√ Eavesdropping anomaly detection [103]● Injection defense√ Jamming signal injection defense [98], [101], [104]● Spoofing signal injection defense [105]–[107]
", + "bbox": [ + 506, + 148, + 919, + 494 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "eavesdropping attacks. It categorizes these threats and presents corresponding security technologies and countermeasures. The study in [26] provides a comprehensive review of security issues in UAV-aided IoT applications and presents corresponding security technologies and countermeasures. The work in [27] reviews cybersecurity threats affecting UAV systems and evaluates existing countermeasures in enhancing UAV security.", + "bbox": [ + 501, + 544, + 921, + 650 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In addition, some surveys analyze the challenges faced by UAV systems from a layered perspective (e.g., physical layer, link layer, network layer, application layer). The work in [28] deeply reviews the current state of physical layer security (PLS) in UAV communications, examining unique air-to-ground channel characteristics, static and mobile UAV deployment scenarios, and various security enhancement techniques. The work in [29] presents a comprehensive overview of UAV cellular communications, covering the classification of consumer drones, the concept and potential of UAV-mounted flying base stations. It explores the integration of UAVs into cellular networks as novel user equipment and addresses key challenges related to interference, regulatory compliance, and security. The authors of [30] review the cybersecurity landscape of UAV-assisted IoT applications, examining layer-wise security threats from physical to application layers. It explores how AI, ML, deep learning (DL), and reinforcement learning (RL) techniques have been employed to address authentication, data privacy, and attack prevention challenges.", + "bbox": [ + 501, + 657, + 921, + 945 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 911, + 31, + 919, + 40 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/d546a765ef9d9111da613533a636f1121d9e0b344a8fa8ca2f8b74b4bc8c977f.jpg", + "table_caption": [ + "TABLE III", + "LIST OF ABBREVIATIONS" + ], + "table_footnote": [], + "table_body": "
AbbreviationDescriptionAbbreviationDescription
A2GAir-to-groundABSAerial Base Station
ANArtificial NoiseAIArtificial Intelligence
BCDBlock Coordinate DescentBSBase Station
CNNConvolutional Neural NetworkCSIChannel State Information
DDPGDeep Deterministic Policy GradientDDQNDouble-deep Q-Learning
DLDeep LearningDNNDeep Neural Network
DQNDeep Q-NetworkeVTOLElectric Vertical Take-off and Landing
DRLDeep Reinforcement LearningFARFalse Alarm Rate
G2AGround-to-airG2UGround-to-UAV
GANGenerative Adversarial NetworkGNSSGlobal Navigation Satellite System
GPSGlobal Positioning SystemGSGround Station
IoTInternet of ThingsLAENetLow-Altitude Economy Networking
LSTMLong Short-Term MemoryLoSLine-of-sight
MARLMulti-agent Reinforcement LearningMDPMarkov Decision Process
MDRMiss Detection RateMECMobile Edge Computing
MLMachine LearningMSEMean Square Error
NOMANon-orthogonal Multiple AccessPLAPhysical-layer Authentication
PLSPhysical Layer SecurityPUFPhysical Unclonable Function
QoEQuality of ExperienceRFRadio Frequency
RISReconfigurable Intelligent SurfacesRLReinforcement Learning
RNNRecurrent Neural NetworkRSSReceived Signal Strength
SCASuccessive Convex ApproximationSDNRSignal-to-disturbance-plus-noise Ratio
SNRSignal-to-noise RatioSOCSecond-Order Cone
TDMATime-division Multiple AccessTHzTerahertz
U2GUAV-to-ground CommunicationUAVUnmanned Aerial Vehicle
", + "bbox": [ + 181, + 103, + 816, + 417 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "B. Contributions of Our Survey", + "text_level": 1, + "bbox": [ + 75, + 443, + 294, + 458 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The related surveys and tutorials primarily focus on the classification of overall security threats and corresponding countermeasures in UAV networks or UAV-assisted applications, with relatively little attention given to security issues of communication in the physical layer. Different from existing studies, our survey uniquely concentrates on the security challenges specific to physical layer communications in the LAENet, as summarized in Table II. It fills a critical gap in the literature by conducting an in-depth analysis of threats in physical layer communications that were previously underexplored or only briefly mentioned in prior studies. By offering a comprehensive and systematic analysis of these underexplored issues, our work brings new insights to seek effective solutions to enhance physical layer security in communications of the LAENet. The key contributions of this paper are summarized as follows:", + "bbox": [ + 73, + 468, + 491, + 709 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The key contributions of this paper are summarized as follows:", + "bbox": [ + 73, + 710, + 490, + 741 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- A thorough discussion of the six main security issues in the physical layer communication of the LAENet is presented, namely, eavesdropping attack, unauthorized access, jamming attack, spoofing attack, anomalous behaviors, and injection attack. We analyze these attacks in the context of their potential occurrence throughout the entire operation of LAENet, providing essential references for ensuring the security of physical layer communication in the future LAENet deployments.", + "- We review countermeasures against various attacks in detail and offer a comprehensive tutorial on achieving communication confidentiality, communication availability, and communication integrity in LAENet. In addition," + ], + "bbox": [ + 89, + 748, + 491, + 946 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "the lessons learned for each security issue are presented to emphasize the limitations of existing works and provide high-level insights for improvements.", + "bbox": [ + 535, + 443, + 921, + 488 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "- Several potential future research directions for secure physical layer communication in LAENet are proposed, including energy-efficient physical layer security, multi-drone collaboration for secure communication, AI-driven security defense strategy, space-air-ground integrated security architecture, and 6G-enabled secure UAV communication. These diverse perspectives offer new guidance for future research on secure physical layer communication in LAENet.", + "bbox": [ + 521, + 489, + 921, + 625 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The remainder of this paper is given as follows. Section II introduces the background of the LAENet and security issues in physical layer communication of the LAENet. In Section III, a comprehensive exploration of achieving communication confidentiality for the LAENet is presented. Section IV reviews the solutions for communication availability in the LAENet. In Section V, countermeasures on communication integrity for the LAENet are discussed. Section VI provides future research directions, and Section VII concludes this paper. Additionally, Table III lists the abbreviations commonly employed throughout this survey.", + "bbox": [ + 503, + 625, + 921, + 792 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "II. BACKGROUND KNOWLEDGE", + "text_level": 1, + "bbox": [ + 598, + 803, + 826, + 816 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this section, we introduce the background of the LAENet, including its definition and application scenarios. Subsequently, the concept of physical layer communication in the LAENet and its security threats are elaborated in detail.", + "bbox": [ + 503, + 821, + 921, + 882 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "A. Background of LAENet", + "text_level": 1, + "bbox": [ + 504, + 896, + 687, + 910 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The LAENet is a sophisticated and dynamic system that integrates various aerial and terrestrial technologies to en", + "bbox": [ + 503, + 914, + 921, + 945 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 911, + 31, + 919, + 39 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/1411e6ebf9ab0f74ca0614dc20359a8ea382871faee714eff22b4eddae00662e.jpg", + "image_caption": [ + "Fig. 2. Background knowledge of the LAENet and security issues in its physical layer communication. Describe the definition of the LAENet and its communication application scenarios. Elaborate on three key metrics for secure physical layer communication: communication confidentiality, which combats eavesdropping attacks and unauthorized access; anti-jamming strategies and spoofing defense for ensuring communication availability; and anomaly detection and injection defense to prevent adversaries from compromising communication integrity." + ], + "image_footnote": [], + "bbox": [ + 81, + 70, + 916, + 428 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "able seamless communication, coordination, and management of diverse aerial operations within low-altitude airspace [1], [108]. The LAENet includes numerous different types of constituents, such as flight equipment, base stations, and other communication platforms. Specifically, the LAENet connects various aerial vehicles, including general aviation aircraft for passenger transport and emergency rescue, drones for surveillance and logistics, and eVTOL designed for urban air mobility and last-mile cargo delivery [109], [110]. These aerial vehicles can incorporate ground and aerial base stations, further high-altitude platforms, such as weather balloons and satellites, to receive environmental information and precise navigation [13].", + "bbox": [ + 73, + 515, + 491, + 696 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Different from traditional aviation networks that rely on centralized air traffic control, the LAENet can independently construct communication and networking by seamlessly interconnecting a variety of aerial and ground-based systems, which enables continuous information exchange, flight path optimization, and autonomous operations [8], [111]. Therefore, the LAENet has opened opportunities for various application scenarios and plays key roles from the perspective of communication coverage and relay [112]–[114]. Specifically, the LAENet can extend the communication coverage by deploying aircraft as ABSs in areas lacking communication infrastructure [115]–[117]. For instance, these ABSs deployed at optimal altitudes can provide connectivity and network services in remote or disaster-stricken areas [118], [119]. Moreover, if the direct communication links between ground base stations and user equipment are unreliable, such as in", + "bbox": [ + 73, + 703, + 491, + 945 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "mountainous regions and densely populated areas, the aircraft can act as mobile relays to improve connectivity by capturing, amplifying, and transmitting communication signals [120]–[122]. It also can be regarded as a surveillance unit to monitor airspace dynamics while simultaneously functioning as a low-altitude network orchestrator to optimize communication and computing resources [118], [123], [124].", + "bbox": [ + 501, + 515, + 919, + 619 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To integrate and evolve these capabilities, the LAENet needs to establish effective communication infrastructure to ensure reliable connectivity and efficient interaction across various environments [31], [125]. Physical layer communication, as the bottom layer in the network architecture, may directly influence the communication performance of the LAENet across aerial and terrestrial networks [43], [46]. For example, it governs how signals are generated, transmitted, and received between aircraft and base stations [31]. Building on this, it manages the channel and spectrum resources to enhance signal transmission quality and maintain stable connectivity [43]. Therefore, ensuring the security of physical layer communication in the LAENet is crucial for supporting a wide range of applications in low-altitude domains.", + "bbox": [ + 501, + 622, + 921, + 834 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "B. Security Issues in Physical Layer Communication of LAENet", + "text_level": 1, + "bbox": [ + 503, + 862, + 923, + 890 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Based on previous studies [126], [127], we discuss the security issues in the physical layer communication of the LAENet from three aspects: confidentiality, availability, and", + "bbox": [ + 503, + 898, + 921, + 945 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 911, + 30, + 919, + 40 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "integrity of communications. The details of each measurement are described as follows.", + "bbox": [ + 78, + 69, + 488, + 97 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The confidentiality of physical layer communications in the LAENet can be compromised by security threats such as eavesdropping and unauthorized access [128]. Eavesdropping arises primarily from the broadcast nature of wireless signals and LoS link, making transmissions highly susceptible to interception [46]. An eavesdropper silently capturing or intercepting signals can lead to the exposure of confidential information. Meanwhile, unauthorized access threats exploit the open and broadcast nature of UAV communications [31]. Attackers may gain illegal access to the LAENet by disguising themselves as legitimate UAVs or ground stations, thereby deceiving or interfering with the normal operation of UAVs [51].", + "- Similarly, the open nature of wireless channels and LoS propagation bring jamming and spoofing security issues for communication availability [78]. Specifically, jammers can continuously transmit interference signals to disrupt communication, where a jammer can be a drone or a base station [48]. The spoofing attack can not only achieve identity spoofing by forging legitimate transmission identities but also launch signal deception attacks to disrupt UAV communications and positioning [49]. Therefore, jamming and spoofing lead to unauthorized access and signal disruptions or errors, making communication unavailable in the LAENet.", + "- Integrity as a microscopic metric measures the deviations of signals, channels, and spectrum in communication under adversaries' influence [129]. The communication integrity of the LAENet can be affected by anomalous behaviors and injection attacks. Anomalous behaviors often use dynamic spectrum access and the open wireless medium, including abnormal jamming, abnormal transmission power, and covert eavesdropping [95]. These anomalous behaviors can introduce harmful interference, violate spectrum policies, and expose sensitive information to eavesdroppers [61], [96]. Moreover, the injection attack exploits the open nature of wireless channels to alter signals or inject illegal signals, such as spoofing signals or malicious GNSS signals, to deceive receivers and interfere with communication, thereby leading to degraded signal quality, false navigation, and network congestion [28], [46], [97]." + ], + "bbox": [ + 94, + 128, + 488, + 762 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Overall, as illustrated in Fig. 2, this survey reviews existing research on achieving communication confidentiality, availability, and integrity for the LAENet. Specifically, the investigation of anti-jamming strategies and communication authentication schemes aims to enhance communication confidentiality. Studies on anti-jamming techniques and spoofing defense mechanisms have been explored to ensure communication availability. Furthermore, research on communication integrity has focused on anomaly detection and injection attack mitigation approaches.", + "bbox": [ + 78, + 792, + 488, + 943 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "III. COMMUNICATION CONFIDENTIALITY FOR LAENET", + "text_level": 1, + "bbox": [ + 517, + 69, + 908, + 82 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "A. Anti-eavesdropping Strategy", + "text_level": 1, + "bbox": [ + 506, + 92, + 718, + 106 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The LAENet faces significant eavesdropping threats due to the inherent vulnerabilities of UAV-enabled wireless communications. The openness of wireless channels, especially the LoS links in air-to-ground (A2G) and ground-to-air (G2A) communications, increases susceptibility to interception by eavesdroppers that disrupt legitimate communications compared to traditional terrestrial channels [50]. Traditional cryptographic methods, while effective in many scenarios, are less suitable for UAV communications due to their computational complexity and the dynamic mobility of UAVs [130]. This highlights the critical need for robust security measures to ensure the confidentiality and reliability of the LAENet communications. To address these limitations, leveraging PLS techniques to counter eavesdropping threats effectively has emerged as a promising solution [131]-[134].", + "bbox": [ + 506, + 112, + 919, + 338 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In the LAENet, anti-eavesdropping solutions can leverage the controllable mobility of low-altitude aircraft to enhance physical layer security. By dynamically optimizing their trajectories, low-altitude aircraft can actively adapt their flight paths to shape the communication environment [135]. This approach allows them to fly closer to legitimate ground nodes, strengthening communication links and improving channel conditions for intended receivers, while simultaneously distancing themselves from potential eavesdroppers. In this subsection, we present a critical role of UAV trajectory in forming the communication environment, and how PLS can be enhanced through trajectory optimization and resource allocation to mitigate eavesdropping risks. Our analysis focuses on three prominent methodologies in this domain: convex optimization, deep learning, and reinforcement learning.", + "bbox": [ + 506, + 339, + 919, + 565 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Convex optimization plays a crucial role in addressing anti-eavesdropping challenges in UAV-enabled communication networks, particularly for solving the joint optimization of trajectory and resource allocation [137]. Due to the inherent non-convex nature of these problems, advanced convex optimization techniques such as Successive Convex Approximation (SCA) and Block Coordinate Descent (BCD) are widely utilized [135]. These methods enable UAVs to enhance physical layer security by optimizing flight paths and resource utilization, minimizing the risk of eavesdropping while ensuring secure and efficient communication. Additionally, the decision variables may be discrete, which requires the application of various relaxation methods to transform the complex optimization problem into a more tractable form to obtain efficient solutions [138].", + "bbox": [ + 506, + 566, + 919, + 792 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The study in [53] explores physical-layer security in UAV-assisted Mobile Edge Computing (MEC) systems in the presence of multiple ground-based eavesdroppers. The proposed system utilizes dual UAVs for task execution and anti-eavesdropping measures. One UAV operates as a mobile MEC server, while the other emits jamming signals to disrupt eavesdroppers, as shown in Fig. 3. The time-division multiple access (TDMA) scheme and non-orthogonal multiple access (NOMA) scheme are proposed to maximize the minimum secure computing capacity by jointly optimizing communica", + "bbox": [ + 506, + 792, + 919, + 943 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 911, + 31, + 919, + 39 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/c4e8fccf864b7793ee0ff85b396ae3929eca2320ee801b9671f6ac1ac3fdaf87.jpg", + "table_caption": [ + "TABLE IV SUMMARY OF CONVEX OPTIMIZATION FOR ANTI-EAVESDROPPING STRATEGY CIRCLES DESCRIBE THE METHODS; CORRECT MARKERS AND CROSS MARKERS REPRESENT PROS AND CONS RESPECTIVELY." + ], + "table_footnote": [ + "1Secure calculation capacity is defined as the average number of secure calculation bits in UAV flying time [54].", + "2Secrecy rate is defined as the difference between the achievable rate of legitimate UAV's channel and the rate of eavesdropper channel [136]." + ], + "table_body": "
RefOptimization ObjectivesEavesdropper and Jammer TypeOptimizationConstraintsPros & Cons
[53]Secure calculation capacity1UAV jammer and fixed ground eavesdropperTransmit power, time allocation, and computation capacityBCD and P-BCD for secure calculation capacity maximization\n✓ Secure capacity of NOMA and TDMA has been significantly improved\nX High complexity for NOMA due to dual-loop iterations
[54]Secure calculation capacityBase station jammer and fixed ground eavesdropperTransmission power, time allocation, and CPU processing frequencyJDPB algorithm with SCA and BCD for secure task offloading\n✓ Reduce complexity via region division\nX Fixed UAV altitude limits 3D trajectory optimization
[55]Average secrecy rate2Antenna jammer and fixed aerial eavesdropperTransmit power and jamming powerBCD and SCA optimization with hybrid FSO/RF links\n✓ Enhance communication security via hybrid FSO/RF links and AN\nX Rely on simplified channel models (e.g., free-space path loss)
[56]Worst-case secrecy rateUAV jammer and fixed ground eavesdropperUAV speed, collision avoidance, positioning error, and energy harvestingRobust 3D trajectory and time switching optimization\n✓ Full mobility of UAVs in 3D for improving secrecy rate\nX The performance may degrade with flying eavesdroppers
[57]Average secrecy rateNone and flying eavesdropperTransmit power control and user schedulingJoint trajectory and communication design against mobile eavesdroppers\n✓ Initial trajectory design for keeping away from eavesdroppers\nX Security performance relies on the initial trajectory design
[58]Secure calculation capacityGround jammer and flying eavesdropperTransmit power, time slot, computation capacity, UAV speed, and collision avoidanceIntegrate a dual-UAV system with a ground jammer in MEC\n✓ Incorporate the UAV server and UAV eavesdropper with a ground jammer\n✓ Allow a UAV server to hover near ground users for secure offloading\nX Numerous flight constraints may require extensive tuning
[59]Secrecy rateCoastal jammer and flying eavesdropperTransmit power, time slot, computation capacity, UAV speed, and collision avoidanceA secure communication for UAV-relay-assisted maritime MEC\n✓ Simultaneously optimize multiple parameters for improved secrecy rate\nX Iterative decomposition increases the computational burden\nX Assume prior knowledge of Channel State Information (CSI) of devices
", + "bbox": [ + 76, + 116, + 919, + 450 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "tion resources, computation resources, and UAV trajectories. To address the non-convexity of the optimization problem, the problem is transformed into tractable forms via auxiliary variables and decomposition. Specifically, for the TDMA scheme, the problem is decoupled into two sub-problems using BCD. The communication and computation resources are optimized via second-order cone (SOC) constraints and SCA, while UAV trajectories are iteratively updated via first-order Taylor approximations to handle non-convex terms. For the NOMA scheme, a penalized BCD (P-BCD) algorithm is proposed to tackle binary constraints. The problem is split into three blocks that are penalty parameter adjustment, resource allocation via SOC and SCA, and trajectory optimization with convex relaxations. The experimental results demonstrate that the proposed algorithms significantly enhance secure computing capacity, with the NOMA scheme achieving up to about 4.3 Mbps and the TDMA scheme reaching about 4.2 Mbps under optimal conditions. Compared to baselines including the straight flight design and no power control, the proposed strategies improve secure computing capacity by about $20\\%$ to $30\\%$ , particularly in scenarios with lower power budgets (e.g., 0.2 W) and higher required computing bits (e.g., 1 Mbps). The convergence of the algorithms is achieved within 20 iterations, which indicates the efficiency in optimizing UAV trajectories and resource allocation for anti-eavesdropping.", + "bbox": [ + 73, + 500, + 491, + 878 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The study in [53] mainly focuses on a dual-UAV-assisted secure MEC system. In some cases, multi-UAV systems hold great promise for collaboratively executing complex tasks while enhancing the secure communications [49], [54]. In the", + "bbox": [ + 73, + 883, + 491, + 946 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "work [54], the joint optimization of task offloading, trajectory planning, and resource allocation for secure communications in multi-UAV MEC systems is studied. Firstly, a base station emits jamming signals to protect against fixed-location ground eavesdroppers. Then, it investigated the joint optimization of task offloading, trajectory planning, and resource allocation for secure communications in multi-UAV MEC systems. The problem is decomposed into two sub-problems: (1) resource allocation and trajectory planning, addressed via SCA and BCD algorithms; (2) offloading decisions, solved through Joint Dynamic Programming and Bidding (JDPB) method. For the first sub-problem, non-convex constraints related to transmission power and UAV trajectory are transformed into convex forms using first-order Taylor expansion and relaxation techniques. Specifically, the transmission power optimization sub-problem is approximated via SCA, while the trajectory planning sub-problem is iteratively solved by introducing auxiliary variables and convex approximations. For the second sub-problem, a bidding mechanism is integrated with dynamic programming to reduce computational complexity by grouping dynamic users into sub-regions. The experimental results demonstrate that the proposed JDPB algorithm achieves a sum average secure calculation capacity of 10.1 Mbps in the first time slot. Additionally, under different settings of time slot sizes, transmission power, and flying speed, the sum average secure calculation capacity achieved by JDPB consistently outperforms baseline schemes such as the Greedy Strategy and the Random Strategy.", + "bbox": [ + 501, + 500, + 921, + 922 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Unlike the above studies that deal with ground eavesdrop-", + "bbox": [ + 519, + 928, + 921, + 945 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 911, + 30, + 919, + 40 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b0ced301d684649bd66f097e1413eaf4b7a34732744caf3707d8fc50381c1366.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 80, + 75, + 482, + 253 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/95b7178677e3e6554d2277956eb4b90047bdfe2645c90f9834611a75b83e6307.jpg", + "image_caption": [ + "Fig. 3. The overall architecture of the anti-eavesdropping strategy. Part A illustrates the system model against fixed ground eavesdroppers. In this setup, one UAV operates as a mobile server, while another UAV serves as a jammer to emit jamming signals to disrupt the eavesdroppers' interception capabilities. Part B presents the system model for flying eavesdroppers, where one UAV acts as the server, and another UAV functions as a mobile eavesdropper. To mitigate eavesdropping risks, a ground-based jammer actively emits interference signals to secure communications." + ], + "image_footnote": [], + "bbox": [ + 80, + 276, + 472, + 448 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "pers, the work in [55] targets threats from aerial eavesdroppers and explores secure communication in a hybrid Free Space Optical (FSO) and Radio Frequency (RF) system. The UAV acts as both a relay and a jammer, emitting artificial noise (AN) during RF transmission to confuse a fixed-position aerial eavesdropper. The work introduces a novel perspective on protecting space-air-ground networks from eavesdropping by leveraging FSO for its inherent resistance to interception and jointly optimizing trajectory design and power allocation to maximize the secrecy rate with two transmission schemes. The first scheme is the slot-based scheme for delay-sensitive data. The trajectory sub-problem is convexified using first-order Taylor expansion to approximate elevation angle and channel gain constraints, while the power allocation sub-problem is transformed into a convex form by introducing a lower bound on transmit power to ensure convexity. The second scheme is the period-based scheme for delay-insensitive data, in which the relaxed constraints on sum secrecy rates over the entire flight period are adopted. A similar SCA method [54] is applied to convexly approximate the non-convex terms in the constraints. Compared to benchmark schemes without jamming power optimization, both methods achieve approximately 0.4 Mbps higher secrecy rates by integrating AN transmission", + "bbox": [ + 73, + 597, + 495, + 946 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "and hybrid FSO/RF links.", + "bbox": [ + 504, + 69, + 684, + 82 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "It is worth noting that most existing studies consider optimizing UAV trajectories on a 2D plane. However, optimizing UAV 3D trajectories may be more practical [139]. The study in [56] considers the UAV's 3D flight trajectory and imperfect knowledge of eavesdroppers' locations, while formulating an optimization approach to maximize the worst-case secrecy rate under various practical constraints, including maximum UAV speed, UAV collision avoidance, UAV positioning error, and UAV energy harvesting. To address the non-convexity of the optimization problem, the original problem is decomposed into multiple sub-problems using BCD and SCA techniques similar to studies in [54] and [55]. By incorporating the additional degree of freedom in the vertical dimension, the proposed approach improves the ability to avoid fixed eavesdropping zones, outperforming 2D trajectory models in maintaining secure communication links under dynamic conditions. Simulation results show that the average secrecy rate of the proposed 3D optimization scheme outperforms that of the fixed-height 2D benchmarks (set at $100\\mathrm{m}$ ) by over $20\\%$ .", + "bbox": [ + 501, + 84, + 921, + 371 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Unlike the above studies that focus on fixed ground eavesdroppers, mobile eavesdroppers, such as hostile UAVs, introduce more complex threats due to their ability to maneuver, track, and position for intercept communications [22], [57]. For example, the authors in [57] address the challenges caused by a flying eavesdropper that exploits UAV LOS communication. This work focuses on jointly optimizing the UAV's trajectory, transmit power control, and user scheduling to maximize the minimum average secrecy rate, which enables dynamic adjustments to ensure secure communication even against an mobile eavesdropper.", + "bbox": [ + 501, + 371, + 921, + 536 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Compared to the anti-eavesdropping strategies in [57] that rely heavily on accurate trajectory optimization and resource allocation, the studies in [58], [59] propose using a jammer to actively emit jamming signals, effectively reducing the interception capability of flying eavesdroppers during the computational offloading process of relay UAVs, as shown in Fig. 3. Meanwhile, with the support of SCA and BCD methods similar to [56], the joint optimization problem of UAV trajectories, resource allocation (including transmit power, time slot allocation, and computation capacity), and jamming strategies can be solved while ensuring practical constraints such as flight speed and anti-collision requirements. Importantly, compared to systems targeting fixed ground eavesdroppers, the works in [58], [59] enhance secure calculation capacity or secrecy rate by modeling the trajectories of both the relay UAV and the mobile eavesdropper as dynamic variables optimized over discrete time slots. Specifically, simulation results in [58] demonstrate that the secure calculation capacity of the proposed scheme converges to approximately 2.78 Mbps within 4 iterations, which is significantly higher than the baseline strategy (where only the location of the relay UAV, transmit power, and jamming power are optimized) by approximately 1.6 Mbps.", + "bbox": [ + 501, + 537, + 921, + 883 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Lesson Learned. Convex optimization has emerged as a fundamental tool for developing anti-eavesdropping strategies in UAV-enabled communication systems, particularly for addressing the inherent non-convexity of joint trajectory and", + "bbox": [ + 503, + 883, + 923, + 946 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 911, + 30, + 919, + 40 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/45d7c87df94084b85e80ffec3b31f41bc270ce4f3ed9cdb12fa2a0d42e0bd614.jpg", + "image_caption": [ + "Part A. DDQN-based Scheme" + ], + "image_footnote": [], + "bbox": [ + 84, + 89, + 562, + 224 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/50ec81d8ac805f5b51db78cda3503bc04eb8a6e427ed0449fb7c44c1037e1683.jpg", + "image_caption": [ + "Part B. DDPG-based Scheme" + ], + "image_footnote": [], + "bbox": [ + 593, + 92, + 903, + 220 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/b348c7b89c665fbdeeb480d0daf269e5c8246d7039aaaa623043b3df6f5fb043.jpg", + "image_caption": [ + "Part D. MAPPO-LSTM-based Scheme" + ], + "image_footnote": [], + "bbox": [ + 86, + 257, + 544, + 386 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/ec488ac4d64ffc43f41564e983772de945b631322c5ef73f5df7d5475abec595.jpg", + "image_caption": [ + "Part C. MADDPG-based Scheme", + "Fig. 4. The overall architecture of the RL for anti-eavesdropping. Part A describes the DDQN-based scheme, where the system state is used to generate actions through the DDQN network, followed by action execution and obtaining the next state and reward. An experience replay mechanism is employed to store and randomly sample training data. Part B presents the DDPG-based scheme, where actions are generated through Actor and Critic networks, interacting with the environment to obtain rewards. An experience replay buffer is used to store and sample mini-batches. Part C describes the MADDPG-based scheme, involving multiple UAV agents, each with its own Actor and Critic networks, interacting with the environment and sharing rewards. Part D showcases the MAPPO-LSTM-based scheme, where Actor and Critic networks with LSTM layers process time-series data and train through an experience replay buffer." + ], + "image_footnote": [], + "bbox": [ + 594, + 253, + 903, + 383 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "resource allocation problems. For fixed eavesdroppers, simpler optimization models with fewer dynamic variables (e.g., 2D trajectory optimization) can achieve secure communication effectively. However, mobile eavesdroppers require more sophisticated formulations, including 3D trajectory optimization and robust constraints to account for uncertainties in eavesdropper positions. Another important insight is the adaptability of convex optimization when combined with complementary methods like artificial noise jamming and resource allocation strategies. By leveraging convex optimization, systems can balance secrecy performance with energy efficiency, ensuring practical applicability in real-world UAV operations. Techniques such as SCA and BCD have proven highly effective in decoupling complex optimization problems into solvable subproblems, allowing iterative refinement toward locally optimal solutions. Overall, convex optimization offers a flexible and mathematically rigorous approach to securing UAV-enabled communication systems for anti-eavesdropping.", + "bbox": [ + 73, + 503, + 491, + 775 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "As the number of ground devices increases, along with UAV flight time and the number of optimization variables, the computational complexity of conventional algorithms grows exponentially, leading to infeasibility or suboptimal solutions [140], [141]. Moreover, these methods struggle to adapt to real-time scenarios where UAVs must communicate with mobile users and operate in environments with uncertain or partial information [140], [142]. RL enables UAVs to interact with the environment and autonomously learn optimal policies based on real-time observations [143], as shown in Fig. 4. By leveraging Deep RL (DRL), UAVs can efficiently", + "bbox": [ + 73, + 777, + 491, + 946 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "adapt to changing eavesdropping conditions, optimize secure trajectories, and dynamically allocate resources [144], [145]. This learning-driven approach significantly enhances PLS by ensuring adaptive, scalable, and intelligent anti-eavesdropping strategies in UAV communication networks.", + "bbox": [ + 501, + 503, + 919, + 579 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The study in [60] proposes a Deep Q-Network (DQN)-based approach to address the challenge of securing UAV-assisted multi-user wireless communications against passive eavesdropping attacks. The UAV trajectory optimization is formulated as a Markov Decision Process (MDP), where the state space includes the UAV's 3D coordinates and the positions of users. The action space consists of discrete movements in the $x$ , $y$ , and $z$ directions, with each action representing a step change in position. The reward function is designed to maximize the legitimate users' rates, defined as the sum of the channel capacities of users served by the UAV. Unlike many prior works that assume perfect knowledge of eavesdropper CSI [53], [59], this study focuses on optimizing legitimate user rates and using the DQN-based approach without requiring full knowledge of the eavesdropping channels. The DQN iteratively optimizes the UAV's trajectory, beamforming matrix, and transmit power allocation, ensuring the UAV dynamically adjusts its position to maximize secrecy capacity. Numerical results show that the secrecy capacity improves with the number of users. The proposed method converges an order of magnitude faster than the Q-learning method and achieves around $35\\%$ higher secrecy capacity than Q-learning after 20,000 episodes.", + "bbox": [ + 501, + 580, + 921, + 928 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "However, the DQN method may face the issue of Q-", + "bbox": [ + 519, + 929, + 921, + 945 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 911, + 30, + 919, + 40 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/736a80a4e6b5887a9f24b64146c7c6443017316dd655d4bae306fadd65bc5feb.jpg", + "table_caption": [ + "TABLE V SUMMARY OF RL FOR ANTI-EAVESDROPPING STRATEGY CIRCLES DESCRIBE THE METHODS; CORRECT MARKERS AND CROSS MARKERS REPRESENT PROS AND CONS RESPECTIVELY." + ], + "table_footnote": [], + "table_body": "
TechniquesReferenceAlgorithmPros & Cons
Value-based RL[60]DQN○ DQN algorithm for UAV trajectory optimization to maximize the secrecy capacity\n✓ Low computational complexity, making it easy to train\n× Q-value overestimation, leading to suboptimal action selection
[61]DDQN○ DDQN-based joint trajectory, time allocation, and offloading optimization\n✓ Accelerated convergence via action space pruning\n✓ Real-time optimization of trajectory and resources\n× DDQN is restricted to discrete action spaces\n× DDQN is not suitable for continuous action control
Policy Gradient-based RL[62]CAA-MADDPG○ Multi-Agent DRL with attention mechanisms (CAA-MADDPG) for secrecy rate maximization\n✓ Handle complex multi-agent with the attention mechanism\n× Assume prior knowledge of eavesdropper locations\n× Assume ground devices are static
[63]DDPG○ DDPG-based RL for enhancing bi-directional UAV communication security\n✓ Support mobile devices and ensure bi-directional secureit\n× Computational overhead increases with device density\n× performance may be sensitive to hyperparameter selection
[64]PPO+DCCN○ Hybrid DCCN and PPO for secrecy rate maximization\n✓ The PPO optimizes the UAV trajectory based on the results from DCCN\n× The performance is sensitive to the choice of clipping factor in PPO
[65]MAPPO+LSTM○ MAPPO for multi-agent cooperative anti-eavesdropping and LSTM-enhanced sequential learning\n✓ The MAPPO+LSTM improves the learning capability of sequential sample data\n× Assume perfect knowledge of CSI may be challenging in real-world scenarios
", + "bbox": [ + 76, + 116, + 926, + 380 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "value overestimation, leading to suboptimal action selection [146]. The authors in [61] propose a double-deep Q-learning (DDQN)-based scheme to jointly optimize the UAV trajectory, time allocation, and offloading decision strategy, aiming to maximize the average secure computing capacity for antieavesdropping in UAV-aided MEC. The system model involves one legitimate UAV server, one illegitimate UAV eavesdropper, one ground jammer, and ground users. The proposed DDQN-based scheme models the optimization problem as an MDP with states, actions, and rewards. The states include the coordinates of the UAVs, while the actions involve offloading decisions, time allocation, and trajectory adjustments. The reward function maximizes secure computing capacity. The DDQN model includes a deep Q-network (QN) and a deep target network (TN) to generate actions and evaluate their values. The parameters of the QN are updated by minimizing the loss function, and the parameters of the TN are periodically updated. The proposed scheme reduces the action space size by deleting illegal actions, such as those that violate time allocation constraints or result in resource waste. Unlike prior works [57], [60] that rely on conventional optimization or DQN with limited consideration of task queues, this approach integrates real-time resource allocation and trajectory optimization while ensuring dynamic constraints. The proposed DDQN scheme converges in 2000 episodes, half the episodes required by DQN (4000 episodes), and achieves a 0.02 Mbits higher average secure computing capacity.", + "bbox": [ + 73, + 407, + 491, + 815 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "The value-based RL method (e.g., DQN) mainly focuses on dealing with discrete action spaces that may lead to the loss of optimal solutions [147]. The policy gradient-based RL method (e.g., Deep Deterministic Policy Gradient (DDPG)) can handle continuous action spaces [148], which are more suitable for UAV trajectory and transmit power optimization problems.", + "bbox": [ + 73, + 819, + 491, + 911 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "The authors in [62] propose a multi-agent DRL framework to address the challenge of secure UAV communications in", + "bbox": [ + 73, + 914, + 491, + 945 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "the presence of eavesdroppers. The system model is similar to Part A of Fig. 3, where the UAV server sends confidential information to ground users, and UAV jammers send AN signals to ground eavesdroppers using 3D beamforming. This study designs the Multi-Agent Deep Deterministic Policy Gradient with a continuous action attention mechanism (CAA-MADDPG) to maximize the system's secrecy rate. The attention mechanism dynamically prioritizes relevant agents' observations (e.g., jammers focusing on eavesdroppers) to reduce the exploration space and accelerate convergence, thereby enhancing the system's ability to counteract eavesdropping attempts. The simulation results show that CAA-MADDPG achieves a secure rate of $4.5\\mathrm{bps / Hz}$ and converges in 1000 episodes with three UAV jammers, outperforming MADDPG (around $4\\mathrm{bps / Hz}$ and 1400 episodes) and DDPG schemes.", + "bbox": [ + 501, + 407, + 921, + 633 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "However, the study in [62] just considers UAV-to-ground communication (U2G) and assumed the ground devices are static. The work in [63] addresses the challenge of securing bi-directional ground-UAV communications in a dynamic environment with mobile ground devices and eavesdroppers. Different from prior works that assume static ground eavesdroppers [54], [56], this study considers mobile ground eavesdroppers for more practical real-world scenarios. The communication in U2G and ground-to-UAV (G2U) cases is modeled, considering factors such as channel gains and distances. The problem of maximizing the worst-case average secrecy rate is formulated as a constrained MDP (CMDP) under the constraints of UAV flight space, flight speed, energy capacity, anti-collision, and peak transmit power. To solve the CMDP, the authors design a DDPG-based RL algorithm. The algorithm includes three key components: the primary network (actor and critic networks), the target network, and the replay buffer. They also adopt state normalization and exploration noise to speed up the training convergence of the DDPG. The proposed joint optimization scheme achieves a secrecy rate", + "bbox": [ + 501, + 642, + 921, + 945 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 906, + 30, + 919, + 40 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "over $40\\%$ higher compared to baselines that optimize only trajectory or only power. In addition, DDPG outperforms DQN by approximately $15\\%$ in secrecy rate due to its ability to handle continuous actions.", + "bbox": [ + 78, + 69, + 488, + 128 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The DDPGG methods form a fixed mapping from states to actions, which is not suitable for large state spaces that require more exploration and uncertainty [149]. The PPO alleviates this limitation by introducing proximal policy optimization, which allows for more exploration in the large action space.", + "bbox": [ + 78, + 128, + 488, + 204 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The study in [64] proposes a hybrid framework (Double Cascade Correlation Network (DCCN) + PPO) to maximize the secrecy capacity. DCCN bypasses the need for labeled training data by cascading two neural networks to maximize the secrecy channel rate. The PPO dynamically adjusts the UAV's position by using clipped surrogate objectives to stabilize policy updates and advantage estimation to prioritize high-reward actions. Simulation results show that the proposed scheme (DCCN + PPO) achieves an average secrecy rate of 0.73 bps/Hz, outperforming the benchmarks DCCN + DDPG (0.67 bps/Hz) and Random RIS + PPO (0.13 bps/Hz). However, the average secrecy continues to decline when the transmit power is higher than 2 W, since the jamming signals transmitted by the secondary source against the eavesdropper will also affect the primary users.", + "bbox": [ + 78, + 205, + 488, + 430 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The study in [64] considered only the use of one UAV to assist in secure communication. However, in low-altitude economic networks, it may be more important for multiple UAVs to collaborate to implement anti-eavesdropping strategies. The study in [65] considers a system model treats all secondary transmitters and multiple UAV jammers as multi-agents. A Multi-Agent PPO algorithm combined with Long Short-Term Memory (LSTM) networks, named MAPPO-LSTM, is proposed to maximize the secure communication rate by jointly optimizing the UAV trajectory, transmission power, and energy harvesting coefficients. The problem is formulated as a nonconvex MDP consisting of an action space, state space, observation, and reward (which consists of the sum of the secure communication rate, SINR information, and battery capacity). The MAPPO algorithm introduces counterfactual baselines to address the credit assignment problem in centralized learning and combines with the LSTM network to enhance the learning capability of sequential sample data. Compared to benchmark schemes MAPPO and MADDPG, the proposed MAPPO-LSTM method achieved around $17\\% - 20\\%$ higher average secrecy rate in large-scale scenarios, with convergence speeds 1.37 times and 1.93 times faster, respectively. In addition, the reward is sensitive to the discount factor, where setting factor to 0.99 enables faster and more stable convergence. Deviations from this value result in more pronounced fluctuations in the reward and secrecy rate.", + "bbox": [ + 78, + 431, + 488, + 821 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Lesson Learned. The RL has emerged as a powerful yet challenging tool for anti-eavesdropping in UAV-assisted secure communications. A key lesson is that multi-agent cooperation significantly enhances security compared to single-agent approaches, enabling adaptive trajectory control, power allocation, and jamming coordination in dynamic environments. However, scalability and convergence efficiency remain critical bottlenecks, especially in high-dimensional, time-", + "bbox": [ + 78, + 824, + 488, + 943 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "varying settings, as many studies unrealistically assume perfect channel information, and deep RL's convergence time leaves the system vulnerable before optimization completes. Furthermore, key limitations demand further attention, such as RL's computational complexity, which restricts its use in resource-limited settings requiring real-time security, and its sensitivity to hyperparameter tuning that requires meticulous configuration to ensure optimal performance. Future advancements should focus on developing generalizable and robust learning architectures that can dynamically adapt to evolving threats while maintaining computational feasibility, addressing practical deployment challenges, exploring hybrid approaches, prioritizing security in system design, and balancing security performance with energy consumption.", + "bbox": [ + 508, + 69, + 918, + 280 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Deep learning, with its strong learning capabilities, parallel processing, and comprehensive reasoning [150]–[152], has huge potential to enhance anti-eavesdropping strategies in UAV communications, especially in environments with rapidly changing conditions and complex interactions [153]. Given the intricate problem of UAV trajectory variation and its nonlinear characteristics in time and space [154], [155], deep learning networks, such as neural networks and generative models, are emerging as potential solutions to improve the security and performance of UAV communication systems.", + "bbox": [ + 508, + 281, + 918, + 430 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The study in [66] explores the use of deep learning to optimize UAV deployment and jamming strategies against eavesdroppers to maximize the secrecy rate in the complete CSI scenario. The optimization problem is decomposed into two layers: the inner layer optimizes jamming power for a fixed UAV location, and the outer layer optimizes UAV deployment. The inner problem is solved using a bi-section search algorithm, while the outer problem is addressed using a deep neural network (DNN) to approximate the optimal UAV deployment. The DNN is designed as a fully connected structure, which includes an input layer, two hidden layers, and an output layer, as shown in part A of Fig. 5. The DNN is trained using a dataset generated by simulating different UAV deployments and corresponding secrecy rates. The final optimal deployment of the UAV can be approximated when the mean square error of weights between neurons is minimized. The DNN model achieves an average distance error of 2.2 meters compared to the optimal deployment found by the exhaustive search baseline.", + "bbox": [ + 508, + 431, + 918, + 715 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The fully connected neural network used in [66] is suited for problems where inputs and outputs are fixed-dimensional vectors without inherent spatial or sequential relationships [150]. Moreover, convolutional neural networks (CNNs) and recurrent neural networks (RNNs) can also contribute to antieavesdropping. In contrast to fully connected networks, CNNs are particularly effective for exploring spatial features from images or spatial maps [156]. RNNs, on the other hand, focus on handling sequential data by maintaining a memory of previous inputs through recurrent connections [150]. The authors in [67] propose a CNN-LSTM-based secure efficiency map (SEM) framework, which is constructed by calculating each subarea's security-efficiency index using a weighted exponential coefficient to combine normalized secure spectrum efficiency (secrecy rate per unit bandwidth) and secure energy", + "bbox": [ + 508, + 718, + 918, + 943 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 906, + 31, + 919, + 39 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/fce984a856adf58da26013e00ddd120d464b283587d49890dccfd1c7c26d073d.jpg", + "table_caption": [ + "TABLE VI SUMMARY OF DEEP LEARNING FOR ANTI-EAVESDROPPING STRATEGY CIRCLES DESCRIBE THE METHODS; CORRECT MARKERS AND CROSS MARKERS REPRESENT PROS AND CONS RESPECTIVELY." + ], + "table_footnote": [], + "table_body": "
TechniquesReferenceAlgorithmPros & Cons
Neural network model[66]DNN● Use DNN to optimize UAV deployment and jamming strategies for secure communication\n● The DNN model reduces the complexity of exhaustive searches\n● Rely on complete statistical channel knowledge\n● require intensive resources to generate a large amount of training data
[67]CNN-LSTM● CNN-LSTM-based SEM prediction for dynamic secure UAV trajectory optimization\n● Efficient spatiotemporal feature extraction via CNN-LSTM\n● Assume fixed UAV height and passive eavesdropper\n● Training CNN-LSTM network requires a substantial amount of data
[68]FL-DNN● FL-DNN-RL integration (FairLearn) for maximizing fairness in secrecy rates\n● Collaborative learning via FL improves generalization in anti-eavesdropping strategies\n● Involving multiple learning mechanisms requires significant computational resources\n● Assuming perfect CSI and eavesdropper localization may be impractical
Generative AI model[69]MD-GAN● MD-GAN with unknown CSI as model weights\n● Adapt to dynamic environments via gradient-based training\n● Do not require knowledge of the eavesdropper's detection threshold\n● Training a GAN can be computationally intensive
[70]DD-GAN● DD-GAN uses genetic algorithm-generated datasets for GAN training\n● Achieve an effective trade-off between covert rate and detection probability\n● Training relies on the quality and quantity of the genetic algorithm-generated data
[71]GDMTD3● GDMTD3 integrates generative diffusion models into TD3\n● Handle high-dimensional action spaces to adapt mobile eavesdroppers\n● Computational complexity from diffusion denoising steps
", + "bbox": [ + 104, + 116, + 893, + 381 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "efficiency (secrecy rate per unit power). Historical SEMs are fed into a CNN-LSTM network to predict future SEMs by leveraging spatial-temporal feature extraction and time-series correlation. Based on predicted SEMs, a trajectory planning algorithm dynamically guides the UAV to subareas with the highest security-efficiency indices. The proposed SEM-enabled trajectory planning achieves an average security-efficiency index of 0.81, outperforming baseline schemes (e.g., static trajectory [142] or non-predictive methods [62], [157]) by over $30\\%$ .", + "bbox": [ + 73, + 407, + 491, + 556 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Previous deep learning-based architectures [66], [67] are centralized, lacking collaboration and knowledge sharing among UAVs, while also facing challenges in privacy preservation and scalability. To address these limitations and optimize secrecy rate maximization under constraints such as UAV mobility, power budgets, and scheduling fairness, the authors in [68] propose a federated learning (FL)-based framework (FairLearn). As shown in part B of Fig. 5, the FairLearn employs three learning modules: (1) Module-D uses RL to dynamically generate training datasets by exploring UAV trajectories, power allocation, and scheduling policies; (2) Module-P employs a DNN trained on these datasets to predict optimal 3D trajectory, transmit power, and user scheduling, maximizing proportional fairness in secrecy rates (defined as the difference between legitimate UAV-user rates and eavesdropper rates); (3) Module-C applies FL to aggregate DNN models across UAVs, enabling collaborative learning while preserving data privacy. Simulation results show that FairLearn's secrecy rate is $26.6\\%$ higher than BCD at 1.4W transmit power. After 100s of execution, FairLearn achieves $14.34\\%$ , $24.56\\%$ , and $108\\%$ higher secrecy rates than BCD, MAQ, and QCQP baselines, respectively.", + "bbox": [ + 73, + 561, + 491, + 895 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "It is worth noting that UAVs can only obtain limited prior environmental information without knowing perfect channel information and the eavesdropper's detection threshold or ex", + "bbox": [ + 73, + 898, + 491, + 946 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "act location. Some previous methods [59], [65], [68] may find it difficult to solve the optimization problem in such scenarios. In contrast, the generative adversarial network (GAN) has emerged as a new model for solving optimization problems with limited prior information [158], [159]. GAN can effectively model and approximate unknown distributions (such as channel coefficients, detection thresholds, and environmental parameters) through adversarial learning, where the generator continuously improves its strategy by learning from the feedback from the discriminator [158].", + "bbox": [ + 501, + 407, + 921, + 556 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The work in [69] addresses challenges related to partial channel distribution information and unknown eavesdropper detection thresholds by proposing a model-driven GAN (MDGAN) framework. The unknown channel coefficients and detection thresholds are treated as trainable weights in the MD-GAN. The MD-GAN transforms the joint trajectory and power optimization problem into a dynamic game between a generator (UAV) and a discriminator (eavesdropper), where the UAV acts as a jammer to protect secondary users from eavesdroppers. The generator optimizes the UAV's 3D trajectory and jamming power, while the discriminator evaluates detection errors. Then, a GAN-based joint trajectory and power optimization (GAN-JTP) algorithm is developed to achieve Nash equilibrium (i.e., maximizing the covert rate and the probability of detection errors). As shown in part C of Fig. 5, the GAN-JTP algorithm consists of two stages: network learning and network training. In the network learning stage, the generator optimizes the UAV's trajectory and transmit power based on the current state and environment. In the network training stage, the generator and discriminator are alternately trained using gradient backpropagation to update their weights. Simulation results show that increasing the training of the discriminator accelerates the convergence of the generator (e.g., when the training step is 10, convergence is achieved within 30 iterations, compared to 89 iterations when", + "bbox": [ + 501, + 566, + 921, + 946 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 906, + 30, + 919, + 40 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/44334917753d0735f58e494eaeb14ab8b73c2c003ece0c5817ff07e552328506.jpg", + "image_caption": [ + "Part A. DNN-based Architecture" + ], + "image_footnote": [], + "bbox": [ + 83, + 87, + 406, + 174 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/299abeaa978c56d61eea7db4a1382c43b7e456df3bd4d429ea25d378ff930e93.jpg", + "image_caption": [ + "Part B. FL-DNN-based Architecture" + ], + "image_footnote": [], + "bbox": [ + 418, + 87, + 910, + 172 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/7550af879c50bedad2e22d1c77a418e5ebe094455580bef1f05b38b4074d674c.jpg", + "image_caption": [ + "Part C. MD-GAN-based Architecture" + ], + "image_footnote": [], + "bbox": [ + 83, + 196, + 406, + 398 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/12a7ffca05ab706e4c5b6609a65fdf2301b70f22019495c09b13cae13284d61e.jpg", + "image_caption": [ + "Part D. DD-GAN-based Architecture" + ], + "image_footnote": [], + "bbox": [ + 421, + 196, + 910, + 286 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/455a544780c102ab3732612d2d11ccdd816c55ddf36f38780acd13758b685f1b.jpg", + "image_caption": [ + "Part E. GDMTD3-based Architecture", + "Fig. 5. The overall architecture illustrates various deep learning-based architectures designed to enhance anti-eavesdropping capabilities in UAV deployment scenarios. Part A presents a DNN-based architecture that processes air-ground and ground-ground channel states to determine UAV deployment. Part B shows an FL-DNN-based architecture, incorporating modules for reinforcement learning, DNN-based feature mapping, and FL. Part C depicts an MD-GAN-based architecture, where a generator produces trajectories and power outputs based on location and environment status, while a discriminator evaluates the decisions. Part D introduces a DD-GAN-based architecture, focusing on generating jamming solutions to maximize covert rates, with a discriminator distinguishing between jamming and non-jamming solutions. Part E illustrates a GDMTD3-based architecture, utilizing an experience replay buffer and diffusion reverse process to optimize UAV deployment strategies." + ], + "image_footnote": [], + "bbox": [ + 421, + 309, + 910, + 398 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "the training step is 1). For a flight period of 100 seconds, the GAN-JTP algorithm achieves a $0.47\\%$ increase in covert rate with a $0.15\\%$ reduction in detection error probability compared to the BCD-based scheme [160].", + "bbox": [ + 73, + 522, + 491, + 580 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The MD-GAN [69] relies on model-driven methods where the unknown channel information and detection threshold are treated as trained weights. Differently, the authors in [70] propose a data-driven GAN (DD-GAN) framework that focuses on generating data consisting of environmental parameters and optimal solutions to train the GAN. Specifically, the DD-GAN transforms the optimization process into an interactive game between the UAV and eavesdropper, where the UAV aims to maximize the covert rate, and the eavesdropper aims to detect the presence of covert communication. To address the non-convexity and lack of eavesdropper detection threshold information in the optimization process, the DD-GAN trains a generator (UAV) and discriminator (eavesdropper) adversarially, using genetic algorithm-generated samples as prior data, as shown in part D of Fig. 5. The generator produces power and trajectory solutions, while the discriminator evaluates the detectability. The loss function of the discriminator is designed to maximize the probability of correctly identifying real data and minimize the probability of being fooled by generated data. The generator's loss function aims to maximize the probability that the generated data is mistaken for real data by the discriminator.", + "bbox": [ + 73, + 582, + 491, + 912 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Besides GANs [69], [70], another generative model, the diffusion model, has advanced the effective representation", + "bbox": [ + 73, + 914, + 491, + 945 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "of multi-dimensional data distributions [161]. The diffusion model can better capture the complex dynamics and the tradeoff in the multi-objective optimization problem concerning secure communication [112]. For example, The diffusion model captures complex state-action distributions, enabling adaptive beamforming and UAV repositioning under eavesdropper mobility. To tackle dynamic environments and high-dimensional action spaces in secure communication and energy efficiency multi-objective optimization problem, the authors in [71] propose GDMTD3, a Twin Delayed Deep Deterministic Policy Gradient (TD3) algorithm enhanced with generative diffusion models. Key innovations include integrating diffusion-based reverse processes into the actor network for robust policy generation and addressing continuous action spaces, as shown in part E in Fig. 5. The training process of GDMTD3 involves initializing the online critic and actor networks, interacting with the environment, and updating the network parameters based on the collected experiences. The actor network uses a generative diffusion model to sample actions, while the critic networks evaluate the actions using twin critic networks to reduce overestimation bias. Simulation results show that GDMTD3 outperforms DRL-based benchmarks (including PPO, TD3, and DDPG), achieving about $50\\%$ higher cumulative rewards and around $21\\%$ higher average secrecy rate than TD3. In addition, when the number of UAVs increases from 4 to 8, the average secrecy rate increases accordingly. However, increasing the number of UAVs from 8 to 16 raises", + "bbox": [ + 501, + 522, + 921, + 931 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 906, + 30, + 919, + 40 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/a098c7255a5103f909ef1e57c6ad04b2fe83cf8cff00c95130989f6352c468f0.jpg", + "table_caption": [ + "TABLE VII SUMMARY OF AUTHENTICATION FOR COMMUNICATION CONFIDENTIALITY CIRCLES DESCRIBE THE METHODS; CORRECT MARKERS AND CROSS MARKERS REPRESENT PROS AND CONS RESPECTIVELY." + ], + "table_footnote": [ + "Physical Unclonable Functions (PUFs) are hardware-based security primitives that exploit inherent and unique physical variations in devices to generate unclonable and unpredictable responses for communication authentication.", + "2Authentication Distance (AD) is a metric proposed in [77] to distinguish legitimate and illegitimate signals for communication authentication." + ], + "table_body": "
TechniquesReferenceAlgorithmPros & Cons
PUFs-based authentication[72]PUF\\(s^1\\)PUF-based dynamic session key generation and mutual authentication protocol\n✓ Lightweight design with no stored secrets\n✗ Potential overhead during temporary identity updates
[73]PUF-fuzzy extractorPUF-fuzzy extractor mutual authentication with TS-based dynamic session adaptation\n✓ Dynamic session time adaptation minimizes idle periods and enhancing security\n✗ Higher computational cost due to fuzzy extractors
[74]PUFs-fuzzy extractor-AEGISIntegration of PUFs, fuzzy extractors, and AEGIS for mutual authentication\n✓ The proposed password/biometric update mechanism reduces server dependency\n✗ Multiple cryptographic operations and protocols may be cause delay in the implementation
Channel based authentication[75]Rician channelPhysical-layer fingerprinting authentication based on Rician channel characteristics\n✓ Optimal power allocation balances data, AN, and authentication tag transmission\n✗ Reliance on encrypted tags requires high demand on UAV processing capabilities
[76]Rayleigh channelSNR difference-based PLA scheme\n✓ The SNR-based design can be implemented without additional hardware infrastructure\n✗ The simplified Rayleigh channel model may limit to real-world propagation environments
[77]Rayleigh/Rician channelsAD metric2for authentication under Rayleigh/Rician channels\n✓ AD metric-based method improves the detection accuracy of authentication\n✓ Detailed analysis of authentication performance under different propagation conditions\n✗ Computational complexity in Rician channels due to hypergeometric functions
", + "bbox": [ + 83, + 116, + 913, + 347 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "energy consumption but only marginally improves secrecy rates, highlighting a performance-energy trade-off.", + "bbox": [ + 73, + 407, + 491, + 436 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Lesson Learned A key lesson learned is that deep learning, particularly through advanced architectures such as GANs [69], [70] and diffusion models [71], can address complex, dynamic environments with partial channel state information and unknown eavesdropper locations, while demonstrating superior performance over traditional methods [59], [65], [66]. These approaches demonstrate that deep learning not only strengthens the resilience of secure communications but also enables autonomous, real-time decision-making to counteract evolving eavesdropping threats in UAV networks.", + "bbox": [ + 73, + 436, + 490, + 588 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "B. Communication Authentication", + "text_level": 1, + "bbox": [ + 73, + 608, + 308, + 622 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In the LAENet, as UAVs operate in open environments and rely on wireless communication, they are highly vulnerable to security threats such as node capture and man-in-the-middle attacks [46]. Ensuring secure and reliable authentication between UAVs and ground stations/users or among UAVs is critical to preventing unauthorized access [52], [162]. Traditional cryptographic authentication schemes often impose significant computational and memory overheads and incur considerable lantency, making them unsuitable for resource-constrained UAVs [163]. Recently, advancements such as PUFs and Physical-layer Authentication (PLA) mechanisms have opened new possibilities for lightweight and effective authentication in the LAENet.", + "bbox": [ + 73, + 627, + 490, + 821 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "PUFs are a class of hardware security primitives that leverage the inherent manufacturing variations (such as variations in circuit delay or RF properties) in semiconductor devices to generate unique and unpredictable responses [164]. When a specific input is applied to a PUF, the device generates a corresponding response, forming a challenge-response pair that is unique to this device [164]. Such uniqueness and unpredictability make PUFs highly resistant to cloning and", + "bbox": [ + 73, + 823, + 491, + 946 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "tampering, making them as a secure means for device authentication and key generation [165]. In addition, employing a PUF in a UAV allows for secure authentication without the need for complex cryptographic operations, making it an efficient solution for resource-constrained scenarios [166].", + "bbox": [ + 501, + 407, + 919, + 482 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "The work in [72] proposes a lightweight mutual authentication protocol, named SecAuthUAV, for securing UAV-ground station and UAV-UAV communications. SecAuthUAV employs PUFs in each UAV to generate a unique, unclonable session key that functions as a non-reproducible fingerprint. The protocol consists of three phases, as shown in Fig. 6. During UAV registration, a challenge-response pair from the UAV's PUF is stored, and a temporary identity is generated. In the UAV-ground station authentication phase, the UAV and ground station authenticate each other using challenge-response pairs and nonces, establish a session key, and update their identities. Lastly, in the UAV-UAV authentication phase, the GS facilitates secure communication by authenticating a second UAV and generates a session key for both UAVs.", + "bbox": [ + 501, + 487, + 919, + 698 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "However, the work in [72] ignores the fact that the noise in PUFs can result in significant deviation in the output for the same input at different time points. In addition, [72] does not adjust the session time after establishing an authenticated session between two parties, which may lead to the active session remaining idle for a long time and thus give an opportunity for an adversary to interfere with the communication link. In light of this, the authors in [73] propose an UAV Authentication with Adaptive Session (UAAS) framework to address these challenges. Firstly, they combine PUFs and fuzzy extractors to address PUF noise. The fuzzy extractors consist of two phases: the $Gen(.)$ phase creates a key and non-sensitive helper data, and the $Rep(.)$ phase reconstructs the key from a noisy PUF response using the helper data while tolerating minor deviations. Then, the Thompson Sampling (TS)-based scheme is proposed to dynamically adapt the session time.", + "bbox": [ + 501, + 702, + 919, + 944 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 906, + 31, + 919, + 40 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/1a454107197f0583fff2fc6224b7c4e7b0c3d8f3354f603056d3b238606f5780.jpg", + "image_caption": [ + "Fig. 6. The overall architecture of the PUF-based authentication schemes for UAV-GS and UAV-UAV communication in [72]. Part A illustrates the PUF-based authentication process between a UAV and a ground station (GS). The UAV sends its ID and a nonce to the GS, which responds with a hash value based on the PUF, UAV ID, and nonce. The UAV then sends a value derived from the PUF and another nonce, and the GS verifies the authentication by comparing hash values. Part B shows the PUF-based authentication between two UAVs (U1 and U2) through the GS. After establishing a session key with the GS, U1 requests a connection to U2. The GS facilitates the authentication by generating a new session key, which is securely shared between U1 and U2." + ], + "image_footnote": [], + "bbox": [ + 80, + 66, + 916, + 308 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "TS is a probabilistic approach that balances exploration and exploitation, determining the session time based on the fraction of busy time to minimize idle periods and reduce the risk of adversarial interference. Although the security analysis demonstrates that UAAS improves the security level in the mutual authentication mechanism, its throughput is $20.38\\%$ lower and computational cost is 126 ms higher than the baseline [72] due to security overhead.", + "bbox": [ + 73, + 402, + 491, + 525 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In the LAENet, while establishing mutual authentication between UAVs and ground stations is critical, it is also important to incorporate role-specific access controls for users to ensure communication confidentiality and preventing unauthorized access [167]. The work in [74] proposes an authentication framework PUF-enabled authentication framework for Internet of Drones (PAF-IoD) to establish mutual authentication among users, UAVs, and ground stations. Users need to authenticate with stations to access the stored data or communicate directly with UAVs, where the users' authentication mechanism includes three factors (identity, password, and biometric data). Similar to [73], PAF-IoD uses PUFs and a fuzzy extractor in the authentication process to generate a unique and tamper-proof session key while tolerating the noise in PUFs. Furthermore, the designed authenticated encryption with associative data (AEAD)-based encryption algorithm is utilized for encrypting and decrypting messages exchanged between the user, ground station server, and UAVs.", + "bbox": [ + 73, + 530, + 491, + 803 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In addition to leveraging intrinsic physical properties of hardware for authentication design through PUFs [72]–[74], the characteristics of communication channels can be used for authentication. The PLA mechanism authenticates devices by exploiting the unique physical characteristics of wireless communication channels, such as CSI, received signal strength (RSS), and signal-to-noise ratio (SNR) [168]. The main reason is that the wireless channel between two communicating entities exhibits location-specific and time-varying properties", + "bbox": [ + 73, + 808, + 491, + 946 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "due to multipath propagation, fading, and environmental factors [169]. These diverse physical channel conditions, which provide a robust set of features for authentication, have been investigated in terrestrial communication networks [168]–[170]. Furthermore, the source of received signals can be accurately and promptly detected [170], making PLA particularly advantageous in the dynamic and complex communication environments of the LAENet.", + "bbox": [ + 501, + 402, + 921, + 523 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The authors in [75] leverage the unique properties of the physical layer channel, Rician channel, to develop a PLA for UAV-ground station communication. Given that UAVs receive signals subject to the Rician fading model, the ground station integrates authentication directly into the transmission process. It employs a one-way collision-resistant function (e.g. cryptographic hash function) that combines data symbols with a shared secret key to generate a low-power authentication tag for UAV and seamlessly embeds it into the transmitted signal. The authentication tag is validated by the correlation shaped by the Rician statistical characteristics of the fading channel, i.e., the correlation between the estimated tag (derived from the received signal) and the expected tag (generated using the secret key and decoded data symbols).", + "bbox": [ + 503, + 530, + 921, + 743 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "However, the work in [75] still partially relies on cryptographic tag generation for authentication, which may not be suitable for UAVs with limited processing capabilities. The study in [76] leverages channel characteristics and geographical locations for PLA design, where the SNR differences between consecutive transmissions are utilized as the authentication metric. Specifically, a legitimate transmitter and a jammer have distinct channel variations due to differences in their geographical locations. The UAV authenticates the legitimate transmitter or jammer by formulating a binary hypothesis test based on the SNR difference between two successive transmissions. If the difference falls within a predefined threshold, the transmission is authenticated as from the legitimate", + "bbox": [ + 501, + 748, + 921, + 946 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 906, + 30, + 919, + 40 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/f2a0eafa0a8185bc1fc9742760bf005b8217a9d36dcaad8f6cc2e5c56af12ae0.jpg", + "image_caption": [ + "Part A. Channel-based authentication in urban" + ], + "image_footnote": [], + "bbox": [ + 127, + 99, + 444, + 305 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/967f281427d80e32166ad3daced3cb08fb45a53cc42cab57a86b73497c509e23.jpg", + "image_caption": [ + "Part B. Channel-based authentication in suburban", + "Fig. 7. The overall architecture of the channel-based authentication in urban and suburban environments in [77]. Part A depicts the authentication process in an urban environment under Rayleigh channel conditions. The UAV receiver calculates the SNR, computes the AD, and compares it with a detection threshold to distinguish between legitimate and malicious sensors. Part B illustrates the authentication process in a suburban environment, where the UAV receiver performs similar steps to authenticate legitimate sensors and detect malicious ones under Rayleigh channel conditions." + ], + "image_footnote": [], + "bbox": [ + 114, + 353, + 455, + 559 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "transmitter; otherwise, it is classified as a jammer. The closed-form expressions for the probability density function of SNR differences, false alarm rate (FAR), and miss detection rate (MDR) are derived under Rayleigh fading channels in single-UAV and dual-UAV scenarios. The non-convex optimization problem of minimizing MDR under FAR constraints is solved using an SCA algorithm, which outperforms the RSS-based baseline [90] by about $40\\%$ .", + "bbox": [ + 73, + 700, + 490, + 821 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "It is worth noting that study [75] may lack a comprehensive analysis of the UAV-PLA performance under different propagation conditions. Additionally, the detection performance may be further improved with other indicators. As shown in Fig. 7, the work in [77] proposes a novel PLA framework under different propagation conditions, including dense urban and suburban environments modeled by Rayleigh and Rician channels, respectively. A new metric, Authentication Distance", + "bbox": [ + 73, + 823, + 491, + 945 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "(AD), is proposed as the normalized difference in received SNR between adjacent transmissions. For Rayleigh channels, closed-form expressions for FAR and MDR are derived using convolution and integration-by-parts, while Rician channels employ doubly non-central $F$ distributions to model AD statistics. Similar to study [76], this authentication framework minimizes MDR under FAR constraints. In dense urban settings, MDR depends on path loss and transmitter-UAV geometry. For suburban environments, it incorporates elevation angle-dependent Rician factors and path loss exponents to improve discriminability between legitimate and illegitimate signals. The proposed AD-based method outperforms the SNR-difference baseline [171], achieving 40–60% lower MDR.", + "bbox": [ + 501, + 68, + 921, + 263 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Lesson Learned. Leveraging physical-layer characteristics, such as PUFs and channel properties, in conjunction with communication models and optimization algorithms, has proven effective in enhancing authentication accuracy and reducing detection errors. However, some methods also reveal limitations. For instance, the assumptions of ideal channel conditions and the neglect of practical implementation constraints may limit the applicability of the proposed solutions [76], [77]. Future research should focus on addressing these limitations by exploring more practical channel models and considering the trade-offs between security and system complexity.", + "bbox": [ + 501, + 265, + 921, + 431 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "IV. COMMUNICATION AVAILABILITY FOR LAENET", + "text_level": 1, + "bbox": [ + 529, + 450, + 895, + 465 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "A. Anti-Jamming Strategy", + "text_level": 1, + "bbox": [ + 503, + 470, + 684, + 486 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Jamming attacks pose significant challenges to communication availability in the LAENet by disrupting legitimate communication links and degrading the performance of aircraft communication networks [79], [172]. As shown in Fig. 10, these attacks can exploit the openness and broadcasting nature of UAV networks, making them particularly vulnerable to interference [79]. Malicious jammers can transmit strong signals that weaken signal strength, degrade signal quality, and increase communication delays, leading to unreliable coverage and potential paralysis of the entire network [172], [173]. This vulnerability underscores the urgent need for effective anti-jamming technologies to ensure reliable communication in the LAENet.", + "bbox": [ + 501, + 491, + 921, + 685 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Various anti-jamming strategies have been explored to safeguard the LAENet against malicious jamming, mainly focusing on trajectory adjustment, as well as channel and power control. Overall, by adjusting the trajectory in the spatial domain, an UAV can evade jamming signals while maintaining reliable communication with legitimate devices [80], [173]. Besides the spatial-domain anti-jamming strategy, the UAV can implement a frequency-domain-based anti-jamming scheme. The UAV can select legitimate channels while avoiding jamming signals and control transmit power to minimize energy consumption and latency under jamming attacks [83], [84].", + "bbox": [ + 501, + 686, + 921, + 868 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Convex optimization methods can be used to adjust the UAV's trajectory to achieve anti-jamming by strategically guiding its movement to reduce interference and enhance communication reliability [80], [173]. It provides a systematic and efficient approach to handle the complex, non-convex", + "bbox": [ + 501, + 869, + 921, + 945 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 906, + 30, + 919, + 40 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/ac755b49caad673c5969aa85578ccfc3367cd5587e9664d857f416a9905f81b7.jpg", + "table_caption": [ + "TABLE VIII SUMMARY OF ANTI-JAMMING STRATEGY FOR COMMUNICATION AVAILABILITY CIRCLES DESCRIBE THE METHODS; CORRECT MARKERS AND CROSS MARKERS REPRESENT PROS AND CONS RESPECTIVELY." + ], + "table_footnote": [], + "table_body": "
TechniquesReferenceAlgorithmPros & Cons
Convex optimization[80]BCD, SCABCD and SCA for UAV 3D trajectory optimization for anti-jamming\n✓ Probabilistic LoS performs well in real-world scenarios such as urban environments\nX High computational complexity may be challenging in resource-constrained environments
[81]SCA, DinkelbachSCA and Dinkelbach algorithm for energy-efficient trajectory optimization under malicious jammers\n✓ Balance between throughput and energy consumption in anti-jamming\nX Assume static and LoS-dominated channels
[82]BCD, SCABCD and SCA for joint UAV trajectory and transmit power optimization under jamming\n✓ Improve throughput by considering transmit power optimization against jammers\nX Assume a fixed UAV altitude and a static channel environment
Multi-agent RL[87]MALQLCollaborative MALQL algorithm for anti-jamming with channel and power allocation\n✓ Accelerate convergence compared to single-agent Q-learning\nX Assume predefined UAV trajectories limits to adaptability
[88]MARLMARL with adversarial pre-training for dynamic and generalized jamming\n✓ Generalize to various jamming patterns via adversarial populations for pre-training\nX Pre-training for generalized jamming may require significant offline resources
[89]MATD3MATD3 algorithm with PER for dynamic resource management under jamming attacks\n✓ Handle high-dimensional continuous action spaces\nX The integration of PER and spectrum sensing may increase the computational complexity
", + "bbox": [ + 88, + 116, + 908, + 335 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "problems that arise when optimizing UAV trajectories and various constraints under malicious jamming conditions [131]. The work in [80] investigates anti-jamming 3D trajectory design for UAV-enabled wireless sensor networks under a probabilistic LoS channel model. The probabilistic LoS model accounts for elevation angle-dependent shadowing effects in urban environments compared with simplified LoS models. The BCD and SCA algorithms are employed to optimize the UAV's horizontal and vertical trajectory, allowing the UAV to move closer to the ground station for improved transmission rates while dynamically adjusting its elevation angle relative to the jammer to mitigate interference.", + "bbox": [ + 73, + 361, + 491, + 542 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "However, the anti-jamming trajectory optimization in [80] under the probabilistic LoS model does not consider the energy consumption issue. The study in [81] utilizes SCA and Dinkelbach's algorithm to adjust the UAV's trajectory to avoid areas with jammers while maximizing energy efficiency, which is defined as the ratio of total throughput to propulsion energy consumption during flight. Compared to hovering-centric benchmarks, the optimized trajectory reduced energy consumption by $82\\%$ while maintaining $73.16\\%$ of the sum throughput. It is worth noting that the transmit power of the UAV and station is fixed in [81], whereas power optimization is also an important factor for energy efficiency. The authors in [82] use the SCA and BCD algorithms to maximize throughput by iteratively optimizing power allocation (via convex reformulation of throughput bounds) and UAV trajectory (via slack variables for distance constraints and jamming mitigation) to avoid jamming signals. The proposed scheme achieves $40\\%$ higher throughput compared to the \"Line trajectory with fixed power\" baseline.", + "bbox": [ + 73, + 547, + 491, + 834 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "While convex optimization methods [80]–[82] work well for fixed jamming patterns, they may struggle to handle dynamic, intelligent jamming [174] in real-time due to their reliance on global information and the challenges inherent in solving nonconvex problems with increased optimized variables [142]. In contrast, RL and DRL offer significant advantages by enabling autonomous, adaptive decision-making [143], [147]. These", + "bbox": [ + 73, + 838, + 491, + 946 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "approaches can continuously adjust to environmental changes, learn from past interactions, and optimize performance in real-time [144], [175]. The RL-based anti-jamming methods have emerged as a promising solution due to their ability to operate without excessive prior information (such as unknown environment, CSI, and jamming mode) [147]. Single-agent RL algorithms have been used in previous works to develop anti-jamming strategies in communication networks by regarding jammers and other legitimate users as part of the environment, including independent anti-jamming channel selection methods [83]–[86]. However, these single-agent approaches may fail to converge when dealing with a large number of agents or a high-dimensional action-state space [87], making them impractical for complex, multi-agent scenarios in the LAENet. To address these limitations, multi-agent RL (MARL) methods have been proposed to allow each agent to make decisions based on local information and exchange data with others (such as observations or model parameters).", + "bbox": [ + 501, + 361, + 921, + 633 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The study in [87] proposes a collaborative multiagent layered Q-learning (MALQL) algorithm for anti-jamming communication in UAV networks by jointly optimizing channel and power allocation to maximize system Quality of Experience (QoE). The problem is modeled as a local interaction Markov game based on the constructed interference graph. The MALQL divides the problem into two subgames of channel selection (Layer 1) and power allocation (Layer 2), as shown in part B of Fig. 10. The channel layer uses a graph-based interference model to capture mutual interference among UAVs. Each UAV is represented as a node, and edges are formed between UAVs that are within a predefined interference distance. This model allows UAVs to identify and avoid channels that are being used by neighboring UAVs or jammed by external attackers, thereby reducing the jamming likelihood. The power layer optimizes transmit power to meet rate thresholds. Theoretical analysis confirms that MALQL can converge to a pure strategy Nash equilibrium.", + "bbox": [ + 503, + 637, + 921, + 910 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Nevertheless, there are still some issues with the anti-jamming mechanism in [87]. Considering that the rapid mo", + "bbox": [ + 503, + 914, + 921, + 946 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 906, + 30, + 919, + 40 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/139af8a9a84b86d4198cc46ae8f1d98f29cd82458280ac122e062d30d4140bb7.jpg", + "image_caption": [ + "Part A. Overall system model under jamming" + ], + "image_footnote": [], + "bbox": [ + 84, + 90, + 325, + 213 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/4cb234887973d9ac25e079403f546e492bf9527c15b28c61b6fc521af34f7172.jpg", + "image_caption": [ + "Part B. MALQL-based anti-jamming scheme" + ], + "image_footnote": [], + "bbox": [ + 341, + 90, + 589, + 215 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/fcb150feb0c7dfcec3aa13563eba74bebc6a468a677e0d900fc102f3fcfec36c.jpg", + "image_caption": [ + "Part C. Population update of pre-training for generalized anti-jamming scheme" + ], + "image_footnote": [], + "bbox": [ + 602, + 99, + 903, + 213 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/bc80ee2b36b7d014602c429392c608bf414042951a60d35efc799078722210b7.jpg", + "image_caption": [ + "Part D. PER-MATD3-based anti-jamming scheme" + ], + "image_footnote": [], + "bbox": [ + 89, + 250, + 491, + 367 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/7c7009c9e038fcda366a5af32ce1c4b450452de01631391440f888fae30fd149.jpg", + "image_caption": [ + "Actor-Critic network for agent k", + "Fig. 8. The overall architecture illustrates various reinforcement learning-based anti-jamming schemes designed to enhance communication reliability in UAV-assisted MEC systems under jamming attacks. Part A presents the overall system model, depicting UAVs and jammers interacting within a dynamic environment. Part B shows the MALQL-based anti-jamming scheme, where agents use layered Q-learning to determine actions based on local observations and rewards. Part C depicts the population update mechanism for pre-training a generalized anti-jamming scheme, involving a jammer population, trajectory encoder, and decoder network to optimize jamming policies. Part D introduces the PER-MATD3-based anti-jamming scheme, incorporating a priority experience replay buffer and actor-critic networks to dynamically allocate resources and optimize UAV deployment strategies." + ], + "image_footnote": [], + "bbox": [ + 506, + 250, + 898, + 369 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "bility of UAVs may expose them to various and unknown jamming patterns due to frequent transitions to new scenarios, the anti-jamming methods need to be generalized [176], especially in the LAENet. The work [87] randomly initializes strategies and learns from scratch for a particular deployment environment with no pretraining, which may lead to a reduction in the generalization ability of the anti-jamming strategy. In light of this, the authors in [88] introduce an adversarial pre-training stage in the proposed two-stage MARL with a decentralized partially observable Markov decision process. Specifically, the adversarial pre-training stage uses a quality-diverse jammer population (e.g., fixed, random, sweeping, statistic, and RL-based jamming) to bootstrap generalized anti-jamming strategies instead of directly initializing the agents with random anti-jamming policies, as shwon in part C of Fig. 10. This pre-training ensures that UAVs are not overfitted to specific jamming patterns and can generalize to new jamming attacks in real-world deployments. The pre-trained policies are deployed in the fine-tuning stage, where a graph convolutional-based MARL algorithm is proposed to jointly optimize channel selection and power allocation for anti-jamming similar to [87]. Simulation results show that the proposed solution achieves $20 - 30\\%$ higher cumulative rewards than collaborative multi-agent Q-learning [177] and independent Q-learning [83] under fixed and sweeping jamming.", + "bbox": [ + 73, + 489, + 490, + 867 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Note that previous RL-based anti-jamming strategies [87], [88] mainly rely on the Q-learning method, which is suitable for discrete action spaces but may be limited in dealing with high-dimensional continuous spaces [147], [148]. The authors in [89] propose a PER-MATD3 algorithm against jamming", + "bbox": [ + 73, + 869, + 491, + 946 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "by integrating spectrum-aware channel selection and prioritized experience replay (PER) into an MADRL framework, as shown in part D of Fig. 10. The proposed spectrum-aware intelligent channel selection uses energy detection-based spectrum sensing, enabling UAVs to identify and avoid jammed channels. The TD3 is specifically designed to handle continuous-valued states and actions, where two critic networks, target policy smoothing, and delayed policy updates are used to further stabilize DRL training. By leveraging PER, the agents can learn from high-error experiences, thereby accelerating adaptation to time-varying CSI, imperfect jamming detection, and co-channel interference. By jointly optimizing CPU frequency, bandwidth allocation, and channel selection to minimize the impact of jamming, PER-MATD3 reduces system cost (a linear combination of latency and energy consumption) by approximately $16.7\\%$ , $9.1\\%$ , and $1.2\\%$ compared to the baselines of Q-learning, MATD3-JSC (without PER), and PER-MATD3 (without channel selection), respectively.", + "bbox": [ + 501, + 489, + 921, + 762 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Lesson Learned. Recent advancements in anti-jamming strategies show that intelligent decision-making for trajectory control, channel selection, and power control is essential for effective jamming mitigation. A key takeaway is the successful integration of MARL to develop dynamic and adaptive anti-jamming solutions [75]. By employing intelligent algorithms such as adversarial pre-training and decentralized decision-making, UAV networks can generalize anti-jamming strategies across diverse environments [76], [77]. However, challenges persist in the generalization of these strategies across various jamming types and environmental conditions, as well as balancing the trade-offs between energy consumption,", + "bbox": [ + 503, + 763, + 921, + 945 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 906, + 30, + 919, + 40 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/c26ca86189ba5e15945340467e93f9446f6ab0e20c20d86854d84170b44e6d6a.jpg", + "image_caption": [ + "Part A. ML-based spoofing detection" + ], + "image_footnote": [], + "bbox": [ + 83, + 85, + 483, + 219 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/2ab8ba0297c1868c628e1e48264ec055ad4506c4314985e9ce67835ee4420c3e.jpg", + "image_caption": [ + "Part B. Rule and ML-based spoofing detection", + "Fig. 9. The overall framework of ML and rule-based spoofing detection for GPS spoofing detection in the LAENet. Part A depicts an ML-based spoofing detection mechanism in [93], where multiple CNN classifiers are trained with updated sample weights to form an integrated classification model. Each CNN transfers its optimized parameters to subsequent classifiers, enhancing the model's robustness. Part B presents a hybrid rule and ML-based approach in [94], where statistical analysis of path losses between UAVs and multiple base stations (BSs) is performed by edge servers. The analyzed data is processed through MLPs to generate individual predictions, which are aggregated to produce a final spoofing detection decision." + ], + "image_footnote": [], + "bbox": [ + 86, + 250, + 488, + 411 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "latency, and throughput. Future research could delve into the integration of more adaptive learning frameworks (such as deep learning) into the LAENet for anti-jamming, enabling it to better manage partial or imperfect environmental observations for low-latency, real-time decision-making in multi-UAV systems.", + "bbox": [ + 73, + 571, + 490, + 662 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "B. Spoofing Defense", + "text_level": 1, + "bbox": [ + 75, + 683, + 218, + 698 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "In the LAENet, the openness of A2G transmission channels and the dynamic nature of low-altitude aircraft networks make them particularly susceptible to identity-based spoofing attacks [50]. In such attacks, a malicious entity impersonates a legitimate transmitter using falsified identity information, such as a spoofed media access control address, to gain unauthorized access to the network [52]. Once authenticated, the spoofer can disrupt communications among aircraft by launching more severe attacks, such as rogue access point infiltration and denial-of-service attacks, ultimately leading to network congestion and service outages [75]. Given the limitations of conventional authentication methods that rely on complex cryptographic protocols [52], PLA offers a promising alternative by leveraging the inherent and unique physical characteristics of wireless transmissions for the LAENet, which is introduced in Section III-B. Overall, this type of PLA can defend against spoofing", + "bbox": [ + 73, + 703, + 491, + 946 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "attacks in the LAENet by exploiting the unique characteristics of the wireless channel (such as RSS, Rayleigh channel, and Rician channel) to identify and separate between legitimate devices and spoofers.", + "bbox": [ + 501, + 69, + 919, + 128 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "The work in [90] proposes a PLA framework to detect spoofing attacks by exploiting spatial correlations of RSS in A2G channels. The key idea is that the RSS from a legitimate transmitter will remain relatively consistent due to its fixed location, while the RSS from a spoofer will vary significantly because of its different position and channel conditions. Thus, the UAV receiver can perform a hypothesis test to authenticate incoming signals. if the RSS distance between the current signal and a previously authenticated signal is below a predefined threshold, the signal is accepted as legitimate. Otherwise, it is flagged as a spoofing attempt. However, the work [90] is considered under an ideal transmission scenario, where the propagation environment is perfectly exempted from external interference. To address this limitation, the authors in [91] develop a PLA framework that accounts for channel randomness and interference uncertainty. First, they model the G2A link as a Rayleigh fading channel. Then, they introduce jamming signals as external interference. By modeling the jamming power statistically and incorporating it into the analysis of detected power differences, if the difference in power exceeds a predefined threshold, it is identified as a spoofing attempt. Thus, even in real-world scenarios with interference, the proposed framework can better differentiate between natural channel fading and anomalies caused by spoofing attacks.", + "bbox": [ + 501, + 128, + 921, + 491 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "In addition to using the statistical properties of the Rayleigh channel to design PLA against spoofing in environments with multipath fading (such as urban areas), the channel characteristics in suburban environments should also be considered. To address this, the work [77] proposes a PLA framework to counter spoofing attacks in both urban (Rayleigh channel) and suburban (Rician channel) environments. As mentioned earlier (in Section III-B), a new metric AD is devised to distinguish between legitimate signals and spoofing signals based on differences in channel randomness and geographical factors, such as elevation angles and distances. Adopting the unique fading characteristics of Rayleigh and Rician channels makes it statistically difficult for a spoofer to accurately mimic a legitimate signal. By considering elevation angles and distances in channel modeling, it ensures that a spoofer cannot easily replicate a legitimate signal even if the spoofer knows the legitimate transmitter's location. Simulation results show that the probability of a successful spoofing attack is significantly reduced compared to the baseline [171], where the spoofing miss detection probability drops to 0.014 in suburban environments and 0.371 in dense urban areas.", + "bbox": [ + 501, + 491, + 921, + 808 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "In the LAENet, in addition to being vulnerable to identity-based spoofing attacks, aircrafts are also susceptible to signal spoofing attacks from the Global Navigation Satellite System (GNSS), particularly GPS spoofing, which poses a significant security threat by generating and transmitting counterfeit satellite signals resulting in severe positioning deviations [25]. By interfering with or suppressing legitimate GNSS signals, attackers can manipulate UAV locations in an imperceptible manner to mislead UAVs, causing deviations from intended", + "bbox": [ + 501, + 809, + 921, + 944 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 906, + 30, + 919, + 40 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/35f27e74f7c34c861495fa1c76195ab51abea9e536ca4fcaa9d1d7ca44689523.jpg", + "table_caption": [ + "TABLE IX SUMMARY OF SPOOFING DEFENSE FOR COMMUNICATION AVAILABILITY CIRCLES DESCRIBE THE METHODS; CORRECT MARKERS AND CROSS MARKERS REPRESENT PROS AND CONS RESPECTIVELY." + ], + "table_footnote": [], + "table_body": "
TechniquesReferenceAlgorithmPros & Cons
PLA[90]RSSSpatial correlations of RSS distances in PLA against spoofing attacksUse RSS-based channel characteristics to reduce PLA computational complexityAssume an ideal transmission scenario without external interference
[91]Rayleigh channelDefend against spoofing attacks by considering channel randomness and jammingSimultaneously address spoofing and jamming attacks via PLAAssume static UAVs and a known jamming distribution
[77]Rayleigh and Rician channelsAD-based PLA for spoofing defense under Rayleigh and Rician channelsProvide a thorough analysis of spoofer identification in urban and suburban environmentsAssume perfect CSI in channel modeling
GNSS spoofing detection[92]Rule-based detectionCombine cooperative localization mechanism with Stackelberg game against spoofing attacksSpoofing detection is based on neighboring UAV signal sources without predefined thresholdsExtending to larger UAV groups may require complex adjustments
[93]ML-based detectionImproved AdaBoost-CNN for multi-modal spoofing attack identificationHigh accuracy in identifying spoofing attacks with limited data samplesDependence on predefined signal features may lead to model overfitting
[94]Rule & ML-based detectionMLP and statistical feature extraction on path-loss data for detecting GPS spoofingNo additional hardware/energy burden on UAVsRobust performance under sparse base station coverageSpoofing detection performance degrades in areas with unstable cellular signals
", + "bbox": [ + 84, + 116, + 911, + 347 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "flight paths, violations of no-fly zone regulations, or increased collision risks [46]. Given a critical role of GNSS in UAV operations, effective detection and mitigation strategies for spoofing attacks are essential to ensure flight safety and prevent security breaches in the LAENet. Currently, studies on signal spoofing attack recognition in the LAENet mostly focuses on recognizing GNSS spoofing attack detection, which primarily falls into two categories with respect on rule-based and ML-based methods [19], [25]. Rule-based detection methods typically assess the relative distance and positional deviations of UAVs to determine if they are under GNSS spoofing attack. On the other hand, the ML-based methods pay attention to recognize different spoofing types by learning the characteristics of received signals.", + "bbox": [ + 73, + 373, + 491, + 585 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Generally, the simplified rule-based methods determine whether a UAV has encountered spoofing attacks based on whether its trajectory follows a predictable path [178], [179], since a UAV may exhibit deviations from this path due to the false signals imposed by the spoofer. If the measured deviations exceed predefined thresholds, it indicates a potential spoofing attack. However, relying on predefined thresholds for detecting deviations may not dynamically adapt to the spoofing attacks. The study in [92] proposes a defense mechanism based on cooperative localization, where each UAV uses the relative distances and real locations of neighboring UAVs to detect spoofing attacks. Specifically, each UAV measures its relative distances based on alternative signal sources of neighboring UAVs and compares these results with its own GPS-derived location. If inconsistencies are detected (e.g., the GPS-derived location does not match the majority of the calculated locations), the UAV identifies itself or a neighboring UAV as being under attack. To optimize defense deployment, an equilibrium of a dynamic Stackelberg game is derived between the drone operator (leader) and the spoofer (follower). Simulation results show that the proposed scheme can effectively prevent spoofer's capture, while random/deterministic baselines suffer from attackers capturing one to two UAVs.", + "bbox": [ + 73, + 597, + 491, + 946 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Recent ML-based methods for spoofing defense primarily focus on binary classification concerning normal GPS signals and spoofing signals [180], [181]. However, they fail to recognize specific types of spoofing attack necessary for countermeasures in complex environments. Hence, there is an urgent need to recognize diverse GPS spoofing attack patterns for effective countermeasures for the LAENet. The authors in [93] propose an improved AdaBoost-CNN algorithm to address the challenge of recognizing diverse GPS spoofing attack patterns for UAVs, as shown in part A of Fig. 9. Three categorized spoofing attack patterns are considered including static and dynamic spoofing based on the UAV's motion state, power-matched and overpowered spoofing based on signal power, and position and time spoofing based on the spoofing targets. The authors select key GPS spoofing signal features such as signal quality monitoring, carrier-to-noise ratio, Doppler shift, and clock error to train the classification model. The improved AdaBoost-CNN algorithm integrates multiple weak CNN classifiers into a strong classification model. Each CNN base classifier uses the updated network parameters from the previous CNN as initial values, enabling iterative refinement of network weights to enhance feature extraction and generalization. With 800 simulated samples, the improved AdaBoost-CNN achieves $100\\%$ accuracy, outperforming original AdaBoost-CNN $(94.38\\%)$ , CNN $(74.38\\%)$ , DNN $(60.94\\%)$ , SVM $(40.63\\%)$ , and KNN $(53.13\\%)$ .", + "bbox": [ + 501, + 373, + 921, + 765 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Furthermore, integrating rule-based approaches with machine learning-based methods provides an effective and robust defense against spoofing attacks. The work in [94] leverages statistical features of path losses between UAVs and terrestrial base stations to detect a UAV's trajectory deviation due to GPS spoofing, as shown in part B of Fig. 9. The spoofing detection is formulated as a nonlinear optimization problem that aims to minimize hypothesis test errors by adjusting thresholds, statistical feature weights, and the number of base stations. To further accurately analyze path loss's statistical features for final decisions on predicting GPS spoofing probabilities,", + "bbox": [ + 503, + 777, + 921, + 946 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 906, + 31, + 919, + 40 + ], + "page_idx": 19 + }, + { + "type": "table", + "img_path": "images/b18000f7aeb2c577b647d747d82e6d530210f92f8540c1ae3fe4804b6814cb6a.jpg", + "table_caption": [ + "TABLE X SUMMARY OF ANOMALY DETECTION FOR COMMUNICATION INTEGRITY CIRCLES DESCRIBE THE METHODS; CORRECT MARKERS AND CROSS MARKERS REPRESENT PROS AND CONS RESPECTIVELY." + ], + "table_footnote": [], + "table_body": "
Anomaly typeReferenceAlgorithmPros & Cons
Jamming[98]HDBNSA module based on HDBN for detecting jamming anomalies\n✓ UAccurately characterize and detect jamming anomalies via KLD/DB metrics\n✗ Unstable initialization in unsupervised learning affects the performance of the HDBN
[99]GDBNGDBN to model the radio environment and detect and classify jamming anomalies\n✓ Unsupervised learning eliminates dependency on labeled data in classification of anomalies\n✗ Computational complexity increases with the number of jamming categories
[100]Active-GDBNActive-GDBN used to model UAV-jammer interactions for anomaly detection\n✓ Actively incorporate UAV's actions for faster adaptation and jamming detection\n✗ M-MJPF requires significant computational resources
[101]Blind channel estimation & ACSBlind channel estimation based on ACS properties to detect jammer signals\n✓ Does not rely on prior knowledge of the jammer's behavior\n✗ Assumes a specific structure of the multicarrier modulation format
Abnormal Power[102]Spectrum surveillanceLocal and cooperative detection of abnormal power emission\n✓ Handle both aggressive and passive power misuse\n✓ Cloud-based framework enables real-time closed-loop surveillance\n✗ Computational complexity increases with the number of SN
Eavesdropping[103]SVM & K-meansOne-class SVM and K-means clustering for detecting eavesdropping anomalies\n✓ One-class SVM and K-means are stable under varying eavesdropper power\n✗ Detection performance mainly depends on the quality and quantity of the ATD
", + "bbox": [ + 99, + 116, + 897, + 347 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "multilayer perceptron (MLP) neural networks are deployed on edge cloud servers, where individual MLP models at each BS are used to analyze statistical features of path losses. Simulation results show that the proposed method achieves $97\\%$ accuracy with two base stations and $83\\%$ accuracy with a single base station, outperforming baseline approaches such as adaptive trustable residence area (ATRA), which necessitates three base stations for triangulation [182].", + "bbox": [ + 73, + 373, + 491, + 494 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Lesson Learned. For identity spoofing in the LAENet, leveraging signal features such as received signal strength and channel randomness in PLA design is an effective approach [77], [90], [91]. On the other hand, employing rule-based or ML-based techniques can detect and mitigate GNSS signal spoofing [92]–[94]. While ML-based methods show promising performance, they are limited by factors such as computational complexity and dependency on large datasets. Rule-based methods are simpler but may struggle in dynamic or uncertain environments. Future research could explore the application of RL to develop adaptive and robust spoofing defense mechanisms in the LAENet, which has not yet been extensively studied. Different from the abovementioned approaches, RL dynamically learns from interactions with the environment, and its sequential decision-making ability enables UAVs and ground stations to optimize spoofing defense strategies based on continuous feedback [147], make it a promising direction for enhancing spoofing defense in the LAENet", + "bbox": [ + 73, + 494, + 491, + 767 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "V. COMMUNICATION INTEGRITY FOR LAENET", + "text_level": 1, + "bbox": [ + 114, + 784, + 450, + 799 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "A. Anomaly Detection", + "text_level": 1, + "bbox": [ + 73, + 804, + 232, + 819 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Due to the open nature of wireless channels and the dominant LoS links in the LAENet, communication becomes particularly vulnerable to a diverse range of anomalous behaviors such as abnormal jamming, abnormal transmission power, and covert eavesdropping [46], [49]. Specifically, malicious jammers sense spectrum activity and dynamically adapt their interference patterns to mislead the UAV into taking suboptimal or harmful actions [81], [95]. In parallel, abnormal", + "bbox": [ + 73, + 823, + 491, + 946 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "power emissions, either due to device faults, selfish behavior, or malicious intent, can violate spectrum policies, introduce harmful interference, and disrupt cooperative spectrum sharing [96]. Additionally, the pervasive risk of eavesdropping is that adversaries exploit the UAV's uplink or downlink transmissions to intercept sensitive data [61], [67]. Thus, it is essential to detect and mitigate these abnormal activities in the LAENet. Different from previously reviewed approaches such as anti-eavesdropping (Section III-A) and anti-jamming (Section IV-A), anomaly detection is a method used to identify and mitigate unexpected deviations from or irregularities in normal operational patterns by monitoring communication channels in the LAENet [127], [183].", + "bbox": [ + 501, + 373, + 921, + 570 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Jamming anomalies generally aim to disrupt the normal operation of UAV communication links, such as by injecting disruptive signals to interfere with the legitimate communication process. The study in [98] proposes a novel Self-Awareness (SA) module to leverage the radio to detect abnormal behaviors caused by jamming attacks for Cognitive UAV communications. The SA module unsupervisedly learns a generative model using a Hierarchical Dynamic Bayesian Network (HDBN) [184] to represent the joint distribution of random variables characterizing the radio environment at different levels of abstraction and across time, where the Modified Bayesian Filtering [185] is used to integrate multilevel abnormality measurements for online predictions of radio environmental states at different levels. Since jamming can disrupt and shift the distributions of the radio environment, the abnormalities can be detected by calculating the Kullback-Leibler Divergence (KLD) and Dhattacharyya distance (DB) [186] between predictive messages and diagnostic messages. The predictive messages are generated by the HDBN to capture the expected patterns of normal signals, and diagnostic messages reflect the actual state of the signal. The jammer's impact is characterized by calculating generalized errors based on shifts in amplitude, phase, and frequency of signals, allowing the radio to predict future activities of the jammer. The SA", + "bbox": [ + 503, + 582, + 921, + 946 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 903, + 30, + 919, + 40 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/77a3923ca021fd3efb360f7086344f55ac8a6517a1d3d497b159cd347884c6bc.jpg", + "image_caption": [ + "Part A. HDBN-based Scheme" + ], + "image_footnote": [], + "bbox": [ + 83, + 90, + 506, + 210 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/90cd29842c633091f87cc0f88b31ff42f3aa3d723ca41aa33e5bffc52b6ea94e.jpg", + "image_caption": [ + "Part B. GDBN-based Scheme" + ], + "image_footnote": [], + "bbox": [ + 86, + 234, + 504, + 295 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/57804c0b57ed072e72c64d0c713f6844a0ec91912cdd542854e4d53830070bae.jpg", + "image_caption": [ + "Part C. Active-GDBN-based Scheme", + "Fig. 10. The overall architecture illustrates jamming anomaly detection to enhance communication integrity in the LAEnet. Part A presents an HDBN-based scheme focusing on hierarchical dynamic models to predict and detect abnormal signals caused by jammers. It details the transition probabilities between model states and the prediction of continuous states based on discrete superstates. Part B introduces a GDBN-based scheme, extending the HDBN approach by incorporating generalized states and observations, allowing for more nuanced predictions and error calculations. Part C depicts an Active-GDBN-based scheme, integrating UAV actions into the model to actively infer and adapt to the environment, thereby optimizing resource allocation and anti-jamming measures." + ], + "image_footnote": [], + "bbox": [ + 526, + 93, + 906, + 297 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "module achieves a near $100\\%$ abnormality detection accuracy, approximately $12\\%$ higher than the traditional energy detector-based scheme.", + "bbox": [ + 73, + 411, + 491, + 455 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Different from the previous work [98], which introduced the SA module using HDBN for anomaly detection, the authors in [99] propose a Generalized Dynamic Bayesian Network (GDBN)-based framework to enhance the SA module by further classifying the detected anomalies caused by multiple jammers. A generalized state-space model [184] is used to represent the evolving radio environment as a GDBN model learned in an unsupervised manner. Different from the KLD/DB metric in [1], Kullback-Leibler divergence and Bhattacharyya distance are used as abnormality measurements between predicted and observed signals to detect jamming. Once an abnormality indicative of jamming is detected, the UAV extracts the interfering signal and compares it with prelearned GDBN models (each representing a different jamming modulation scheme). By evaluating which pre-learned model best explains the extracted jamming signal, the UAV can not only detect the presence of a jammer but also classify its modulation type. Simulation results show that the GDBN-based method achieves an overall classification accuracy of $98\\%$ at $\\mathrm{SNR} = 10$ dB, outperforming LSTM $(88\\%)$ , CNN $(67\\%)$ , and SAE $(90\\%)$ .", + "bbox": [ + 75, + 458, + 491, + 775 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Based on the study [99], the authors in [100] propose an Active-GDBN to model the dynamic interaction between the UAV and jammer for anomaly detection. Similar to [99], the generalized state-space model [184] is used to capture the features and dynamic evolution of UAV signals to represent the radio environment. Differently from passive detection and classification of jamming signals in [99], the Active-GDBN achieves active anomaly detection by incorporating the UAV's actions into the inference process. Specifically, the UAV employs a Modified Markov Jump Particle Filter (M-MJPF) [187] to link the UAV's actions to environmental", + "bbox": [ + 73, + 777, + 491, + 945 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "states and observations. Meanwhile, the UAV dynamically adjusts physical resource block selections to evade jamming by encoding jammer behavior and updating beliefs. The Active-GDBN achieves about $25\\%$ to $37.5\\%$ faster convergence on anomaly detection probability than the Q-learning-based baseline under various jamming types.", + "bbox": [ + 501, + 410, + 919, + 502 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Different from previous works [98]–[100] that detect jamming anomalies based on the statistical distribution divergence of the signal, study [101] focuses on detecting anomalies by exploiting the time delays, shifts, and modulation of the signal characteristics. Firstly, achieving blind channel estimation involves constructing cyclic correlation matrices to identify distinct Doppler shifts and time delays associated with transmissions by exploiting the inherent almost-cyclostationary (ACS) properties of UAV and jammer signals (e.g., periodic statistics from OFDM modulation). Then, this blind estimation process is combined with a widely linear minimum mean square error (WL-MMSE) filter to provide an initial estimate of the symbol vector by leveraging the non-circular statistics of the received signal, where the initial estimate includes contributions from both the UAV and the jammer. Finally, a post-sorting algorithm (PSA) is employed to iteratively decode and cancel the jammer's contribution by ranking and removing symbols with the highest signal-to-disturbance-plus-noise ratio (SDNR). Simulation results demonstrate that the proposed scheme can effectively detect and separate the jamming signals from UAV signals without requiring prior knowledge of the jammer's characteristics, even when the jammer's power is twice as strong as the UAV's power.", + "bbox": [ + 501, + 503, + 921, + 851 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "In addition to jamming anomalies, which cause interference and security threats in the LAENet, abnormal power emissions in UAV communication networks also represent a critical type of anomaly, potentially leading to severe disruption of communication quality and violation of spectrum policies. The work in [102] proposes a cloud-based surveillance framework", + "bbox": [ + 501, + 854, + 921, + 944 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 906, + 31, + 919, + 40 + ], + "page_idx": 21 + }, + { + "type": "table", + "img_path": "images/aefb9e00909efa991b8f89e18e40ebc9e14f812ef0f371b6668cba7d6f813151.jpg", + "table_caption": [ + "TABLE XI SUMMARY OF INJECTION DEFENSE FOR COMMUNICATION INTEGRITY CIRCLES DESCRIBE THE METHODS; CORRECT MARKERS AND CROSS MARKERS REPRESENT PROS AND CONS RESPECTIVELY." + ], + "table_footnote": [], + "table_body": "
Injection typeReferenceAlgorithmPros & Cons
Jamming signal[98]HDBNHDBN-based jamming signal extraction and suppression\n✓ Autonomous learning from raw I/Q data enables adaptability to dynamic jamming patterns\nX Assume the jammer's output power remains constant during attacks
[101]SICSIC with blind channel estimation for detecting and eliminating jamming signals\n✓ Eliminate jamming signals regardless of the mobility patterns of jammers\nX Rely on sufficient cyclostationary features in the received signal
[104]DBFDBF algorithm for nullifying jamming signals\n✓ Effective suppression of jamming signals while maintaining carrier phase integrity\nX May be limited to specific GNSS frequency bands
Spoofing signal[105], [106]API & LSRSIC combined with API and LSR to recover legitimate signals from spoofing attacks\n✓ SemperFi with a single antenna does not require additional hardware\nX Limited to attackers with a power advantage lower than 15 dB
[107]Subspace projectionSubspace projection for nullifying spoofing signals\n✓ Low parameter dependency, requiring only code delays and carrier frequencies\nX Suppression performance declines if spoofing and legitimate signals have similar code delays
", + "bbox": [ + 96, + 116, + 901, + 301 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "to address the detection of abnormal power emissions, where the cloud server assigns spectrum resources to the UAVs and shares UAVs' spectrum usage information with the surveillance center. The surveillance center assigns the detection task to $K$ surveillance nodes (SNs) for local detection of abnormal power emission, where the detection rule is based on the Lagrange multiplier method and the generalized likelihood ratio test. After local decisions, $K$ SNs report results to the surveillance center, where cooperative detection of abnormal power emission is performed using the decision rule that declares an abnormal event when at least $L$ out of $K$ nodes detect an abnormality, where the optimal global threshold of $L$ is determined by solving the constraints on the global false-alarm probabilities. Simulation results show that the global detection probability exceeds 90% when transmit power deviation exceeds 0.02W (allocated power is 0.01W).", + "bbox": [ + 73, + 328, + 491, + 570 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Besides the threats of jamming and abnormal power emission, another critical anomaly that requires detection is eavesdropping in the LAENet, where malicious devices covertly intercept sensitive information during UAV-to-ground and UAV-to-UAV transmissions [66], [67]. Note that most previous works on anti-eavesdropping focused on measuring secure performance through secrecy rate and/or secrecy outage probability (such as [71], [76]) rather than emphasizing the detection of eavesdropping attacks. The work in [103] explores anomaly detection for eavesdropping attacks in UAV-aided wireless systems using unsupervised learning. Two datasets are prepared: artificial training data (ATD), simulated without eavesdropping based on CSI (all labeled normal), and a practical dataset extracted from received signal features (mean and variance of amplitude). Two types of unsupervised learning methods are designed for anomaly detection. One-class SVM maps data to a high-dimensional space, defining a normal region where outliers are detected. K-means clustering classifies test data into two clusters, labeling the one nearest to the ATD center as normal.", + "bbox": [ + 73, + 575, + 491, + 878 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Lesson Learned For jamming anomalies, the statistical distribution divergence detection and signal structural feature-based detection, such as HDBN, GDBN, and ACS, are used to model the dynamic environment and detect deviations from", + "bbox": [ + 73, + 883, + 491, + 946 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "learned normal patterns. For abnormal transmission power detection, a cloud-based surveillance framework supports a statistical distribution detection approach to monitor and identify power emission outliers. Leveraging its high computing power, the cloud enables cooperative analysis through multi-source data aggregation, dynamically optimizes detection thresholds using global information, and maintains a feedback loop for adaptive anomaly detection. For eavesdropping detection, unsupervised learning techniques, including One-Class SVM and K-means clustering, achieve the identification of anomalies in received signals. These approaches effectively achieve anomaly detection and demonstrate excellent performance. However, challenges remain, including the reliance on high-quality training data and the complexity of maintaining real-time adaptability in dynamic spectrum environments. Currently, Generative AI such as GANs and generative diffusion models presents a promising research direction for anomaly detection, as demonstrated in the use of generalized models in HDBN and the artificial data generation for training ML and clustering models in [188], [189]. Generative AI could further enrich training datasets and provide a high-level generative model to enhance anomaly detection in the dynamic and uncertain LAENet.", + "bbox": [ + 501, + 328, + 921, + 675 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "B. Injection Defense", + "text_level": 1, + "bbox": [ + 504, + 709, + 650, + 724 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "The low-altitude economy is highly dependent on open communication and network architecture with dense communication links, which brings injection attacks as a significant threat to UAV communication integrity [28], [46]. These attacks involve the deliberate injection of malicious signals, such as jamming and spoofing signals, to disrupt or manipulate legitimate communications [97], [190]. Jamming signal injection can make legitimate signals unrecognizable by emitting high-power electromagnetic interference to degrade signal reception [98]. Additionally, spoofing signal injection can transmit high-power signals to overshadow legitimate GNSS signals. Therefore, eliminating injection signals or separating them from legitimate signals is crucial for ensuring communication integrity in the LAENet.", + "bbox": [ + 501, + 732, + 921, + 944 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 905, + 30, + 919, + 40 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/0a2b881bbb436b377c3415cb335afd7cf07d689d5c423c459fd8a5651c2607af.jpg", + "image_caption": [ + "Part A. SNDR-based SIC for jamming injection defense" + ], + "image_footnote": [], + "bbox": [ + 84, + 88, + 464, + 348 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/95fa115c55a605022b9fd1a951542bb487ecec131003b7074dab8d10a903cb98.jpg", + "image_caption": [ + "Part B. SIC with API and LSR for spoofing injection defense" + ], + "image_footnote": [], + "bbox": [ + 493, + 97, + 911, + 236 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/74dd6b7f5134e0d980159677bb8d2dbfb39a1c546207ad08c741ba3fb68171b0.jpg", + "image_caption": [ + "Part C. Subspace projection for spoofing injection defense", + "Fig. 11. The overall architecture of injection defense mechanisms for UAVs in smart city applications. Part A presents the SIC architecture that processes channel state information to defend against jamming injection attacks [101]. Part B shows an SIC architecture integrated with API and LSR modules, which subtracts injection signals from the received signal to recover normal signals [105], [106]. Part C depicts a subspace projection-based architecture for spoofing injection defense, where the received signal is projected onto the orthogonal null space of the spoofing signals to eliminate them [107]." + ], + "image_footnote": [], + "bbox": [ + 495, + 276, + 903, + 339 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "The UAV's communication can be severely disrupted by jammers that exploit LoS propagation to inject jamming signals into the transmission channel, which may effectively mask legitimate signals and render them unrecognizable [101]. The work in [98] proposes an HDBN-based injection defense scheme to extract and remove the jammer's signal. This work first utilizes the HDBN to detect abnormal behaviors caused by jamming attacks, as mentioned earlier in Section V-A. Once the jammer's presence is confirmed, its signal characteristics are analyzed across multiple levels of continuous in-phase (I) and quadrature (Q) components and observation-level state vectors [191]. The extracted jammer signal is then separated from the received observation using frequency-domain subtraction [192], component-wise I/Q processing, and adaptive filtering [191]. The corrected signal is subsequently demodulated and decoded using techniques and error correction coding to restore the original signal. To maintain resilience against evolving jamming tactics, the system continuously updates the HDBN model to improve suppression commands. Simulation results show that the mean square error (MSE) of suppression commands decreases as the JSR increases, meaning that jammers attacking with higher power can be better estimated than jammers attacking with lower power.", + "bbox": [ + 73, + 435, + 491, + 785 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Different from the work in [98], which separates the jamming signal by analyzing its I/Q characteristics, the study in [101] proposes a Serial Interference Cancellation (SIC) scheme based on SDNR to eliminate injected anomalous signals in UAV communications, as shown in part A of Fig. 11. First, blind channel estimation and a WL-MMSE filter are used to identify UAV and jammer signals (as detailed in Section V-A). Then, the PSA ranks detected symbols based on SDNR, where the jamming signals rank higher in SDNR due to their higher emitted power. The SIC [193],", + "bbox": [ + 73, + 792, + 490, + 946 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "[194] is subsequently designed for progressively eliminating jamming signals. Specifically, the high-rank jamming symbol is decoded, reconstructed using estimated channel parameters, and subtracted from the received signal. The process continues iteratively to eliminate previously detected jamming signals until all UAV symbols are successfully recovered, with the receiver dynamically updating channel estimation to adapt to jammer mobility and environmental changes. Simulation results demonstrate that the UAV signal can be recovered with low bit error rates $(< 10^{-4})$ even when the power of the jammer is double that of the UAV.", + "bbox": [ + 501, + 435, + 921, + 602 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Jamming attacks not only affect U2G and UAV-to-UAV communications but also cause RF interference, leading to UAVs failing to track GNSS signals in the LAENet. In light of this, the work in [104] proposes a self-calibrating digital beamforming (DBF) algorithm to effectively nullify jamming signals while preserving high-precision carrier phase measurements. It calibrates the antenna array's steering vectors and RF channel characteristics. Once calibration is complete, the system performs jamming detection and direction estimation by analyzing interference patterns across the antenna array. Then, the minimum power distortionless response (MPDR) optimization rule is used to calculate optimal beamforming weights, which aim to create nulls in the beam pattern corresponding to the directions of jamming signals, effectively suppressing them. The calculated beamforming weights are applied to the received signals to produce the beamformer output, which effectively suppresses jamming signals while preserving the carrier phase integrity of the desired signals. The proposed scheme achieves up to 80 dB Jammer-to-Signal Ratio (JSR) suppression, significantly outperforming the conventional Power Inversion (PI) scheme.", + "bbox": [ + 501, + 606, + 921, + 924 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "In addition to jamming signals, spoofing attacks can easily", + "bbox": [ + 519, + 929, + 921, + 945 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 906, + 31, + 919, + 40 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "transmit fake signals to manipulate GNSS signals due to their open signal structure and weak signal strength [195]. One type of method is based on signal encryption or data encryption to prevent malicious spoofers from injecting illegitimate signals [196]–[198]. However, they may not be suitable for resource-constrained UAVs in the LAENet. Therefore, defending against spoofing signal injection based on different signal characteristics is a promising solution. The authors in [105], [106] propose an anti-spoofing system, called SemperFi, to autonomously recover legitimate signals during active spoofing for UAVs. The system employs two core modules: the Adversarial Peak Identifier (API) and the Legitimate Signal Retriever (LSR), as shown in part B of Fig. 11. The API detects spoofed signals by correlating inertial measurement unit (IMU) data with calculated position-velocity-time (PVT) solutions [199]. The LSR module replicates the spoofing signal once it is identified. Then, similar to the study in [101], the SIC is applied to subtract the replica from the composite received signal that contains legitimate and spoofing signals. SemperFi enters an iterative refinement process if spoofing signals persist after initial cancellation, where replication, subtraction, and reassessment are performed until the spoofing detector no longer triggers an alarm, indicating sufficient attenuation or elimination of spoofing.", + "bbox": [ + 78, + 69, + 488, + 430 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Besides recovering legitimate signals by subtracting spoofing signals from the received signal [101], [105], [106], projecting the signal is also a viable injection defense strategy. In the study [107], the GNSS receiver's spoofing mitigation algorithm employs a subspace projection-based interference cancellation method to effectively eliminate spoofing signals, as shown in part C of Fig. 11. Specifically, the receiver on UAVs acquires and tracks incoming signals, identifying potential spoofing signals and reconstructing them based on their power levels, pseudo-random noise (PRN) code delays, and carrier frequencies. Then, the receiver uses these reconstructed spoofing signals to construct a spoofing subspace, which represents all possible linear combinations of spoofing signal characteristics. To effectively remove spoofing signals from the received signal, the receiver performs orthogonal projection to obtain a cleaned signal by mapping the received signal onto a complementary null space that is mathematically orthogonal to the spoofing subspace. Simulation results show that shorter projection lengths suppress spoofing signals more effectively than longer projections, achieving a $20\\mathrm{dB}$ gain in Signal-to-Interference Ratio (SIR).", + "bbox": [ + 78, + 431, + 488, + 746 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Lesson Learned The above-mentioned studies have demonstrated the effectiveness for mitigating injection signals, such as jamming and spoofing attacks, thereby enhancing UAV communication reliability and security. These advancements leverage techniques that not only detect malicious signal interference but also enable autonomous recovery. One key advantage is that non-cooperative detection techniques, such as blind estimation [101] and self-awareness models [98], allow for efficient attack identification without requiring prior knowledge of the attacker's signal characteristics to adapt to dynamic and adversarial environments. However, several challenges remain in that beamforming-based or spatial filtering techniques rely on multi-antenna configurations [101],", + "bbox": [ + 78, + 748, + 488, + 943 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "[104], limiting their applicability in cost-sensitive or small UAV systems. Future work should explore lightweight and energy-efficient implementations of injection defense to support stable UAV signal integrity protection. Additionally, more intelligent injection defense strategies combining optimization methods, RL, and ML could enhance resilience against more sophisticated adversaries.", + "bbox": [ + 508, + 69, + 919, + 172 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "VI. FUTURE RESEARCH DIRECTIONS", + "text_level": 1, + "bbox": [ + 583, + 193, + 843, + 205 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "A. Energy-efficient Physical Layer Security", + "text_level": 1, + "bbox": [ + 508, + 213, + 799, + 226 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Future work can focus on exploring more unique physical characteristics of wireless communication, such as exploiting channel characteristics and implementing simple coding schemes, to develop secure and low-energy protocols. Meanwhile, drones in the LAENet need to develop adaptive power control strategies that dynamically adjust transmission power based on channel conditions and security requirements to minimize unnecessary energy consumption [200]. Moreover, dynamic trajectory optimization is equally important for energy efficiency [201]. Future research can explore enabling UAVs to learn attack patterns in real time, share secure trajectory models across swarms, and dynamically adjust flight paths based on real-time security and power consumption feedback.", + "bbox": [ + 508, + 232, + 919, + 439 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "B. Multi-drone Collaboration for Secure Communication", + "text_level": 1, + "bbox": [ + 508, + 463, + 890, + 477 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Future research on secure physical layer communication in the LAENet should move beyond existing dual-UAV collaboration models and explore distributed multi-UAV coordination (or UAV swarms) for enhanced resilience against jamming, spoofing, and unauthorized access [202]. For example, UAV swarms can collaboratively emit interference signals to obscure unauthorized receivers, thereby enhancing the confidentiality of communications [20]. Additionally, the integration of adaptive trust-based mutual authentication protocols among UAVs is essential [26]. Multiple UAVs with mutually verified identities can enable dynamic and secure spectrum-sharing mechanisms to optimize resource utilization in the LAENet.", + "bbox": [ + 508, + 483, + 919, + 661 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "C. AI-driven Security Defense Strategy", + "text_level": 1, + "bbox": [ + 508, + 684, + 769, + 698 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Existing AI-based security strategies mainly focus on training AI models to identify anomalous signals while having some limitations. The resource-constrained drones are unable to train high-quality AI models, making the integration of edge computing a promising approach for model training [200]. Note that AI models may be difficult to generalize in recognizing various anomalous signals because they are pre-trained on previously collected datasets of fixed size. Future work can explore leveraging GAN or diffusion models to generate datasets based on real-time captured anomalous signals [203]. Furthermore, emerging generative AI technologies, such as the diffusion model for secure network topology generation in low-altitude domains [189], [204], AI agents for human-aerial vehicle secure interaction [205], and mixture of experts for robust wireless communications [2], [206], can be explored to achieve a more autonomous and intelligent LAENet.", + "bbox": [ + 508, + 704, + 919, + 943 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 906, + 31, + 919, + 39 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "D. Space-Air-Ground Integrated Security Architecture", + "text_level": 1, + "bbox": [ + 75, + 69, + 442, + 84 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Future research can explore establishing a multi-domain physical layer security framework for LAENet to connect space, air, and ground layers, providing seamless communication coverage and cost-effective network access [55], [207]. A potential key research direction is the development of a coordinated multi-tier security mechanism, where satellites, UAVs, and terrestrial base stations collaboratively enhance physical layer security through dynamic resource allocation and interference management based on real-time CSI and environmental conditions, such as UAV mobility, channel fading, and spectrum constraints.", + "bbox": [ + 73, + 88, + 491, + 255 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "E. 6G-Enabled Secure UAV Communication", + "text_level": 1, + "bbox": [ + 75, + 275, + 377, + 289 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "The advent of 6G networks presents new opportunities for LAENet. Terahertz (THz) communication can offer ultrahigh-speed data transmission capabilities for LAENet [208]. Future research can explore the integration of THz with advanced beamforming techniques to focus signals on legitimate users, thereby enhancing security and reducing the risk of interception. Furthermore, Reconfigurable Intelligent Surfaces (RIS) play a crucial role in strengthening physical layer security by intelligently controlling wireless signal propagation [209], [210]. Future work can investigate RIS-based secure beamforming strategies to mitigate adversary interception, and leverage optimization techniques and DRL to adaptively adjust beamforming against eavesdropping or jamming attacks.", + "bbox": [ + 73, + 295, + 491, + 492 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "VII. CONCLUSION", + "text_level": 1, + "bbox": [ + 215, + 508, + 351, + 523 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "This paper has presented a comprehensive survey on secure physical layer communications in the LAENet, emphasizing the importance of safeguarding confidentiality, availability, and integrity in communications. It introduced the concept and architecture of the LAENet and outlined the associated security issues in physical layer communication. Then, the survey provided in-depth reviews of countermeasures for anti-eavesdropping strategies, authentication schemes, anti-jamming strategies, spoofing defenses, anomaly detection, and injection defense. Finally, the paper proposed a set of forward-looking future research directions. These discussions highlighted the critical role of secure physical layer communication in supporting the development of the LAENet and offered valuable insights for ongoing advancements in this emerging domain.", + "bbox": [ + 73, + 529, + 493, + 755 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 235, + 773, + 331, + 787 + ], + "page_idx": 25 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Z. Li, Z. Gao, K. Wang, Y. Mei, C. Zhu, L. Chen, X. Wu, and D. Niyato, \"Unauthorized uav countermeasure for low-altitude economy: Joint communications and jamming based on mimo cellular systems,\" IEEE Internet of Things Journal, vol. 12, no. 6, pp. 6659-6672, 2025.", + "[2] C. Zhao, J. Wang, R. Zhang, D. Niyato, G. Sun, H. Du, D. I. Kim, and A. Jamalipour, \"Generative ai-enabled wireless communications for robust low-altitude economy networking,\" arXiv preprint arXiv:2502.18118, 2025.", + "[3] H. A. H. Alobaidy, R. Nordin, M. J. Singh, N. F. Abdullah, A. Haniz, K. Ishizu, T. Matsumura, F. Kojima, and N. Ramli, \"Low-altitude-platform-based airborne IoT network (lap-ain) for water quality monitoring in harsh tropical environment,\" IEEE Internet of Things Journal, vol. 9, no. 20, pp. 20034-20054, 2022." + ], + "bbox": [ + 88, + 794, + 491, + 944 + ], + "page_idx": 25 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[4] China holds central economic work conference to plan for 2024. Accessed: Dec. 12, 2023. [Online]. Available: https://english.www.gov.cn/news/202312/12/content_WS657860aecd0868f4e8e21c2.html", + "[5] J. Qiu, D. Grace, G. Ding, M. D. Zakaria, and Q. Wu, \"Air-ground heterogeneous networks for 5g and beyond via integrating high and low altitude platforms,\" IEEE Wireless Communications, vol. 26, no. 6, pp. 140-148, 2019.", + "[6] H. Ahmadinejad and A. Falahati, \"Forming a two-tier heterogeneous air-network via combination of high and low altitude platforms,\" IEEE Transactions on Vehicular Technology, vol. 71, no. 2, pp. 1989-2001, 2022.", + "[7] N. Hossein Motlagh, T. Taleb, and O. Arouk, \"Low-altitude unmanned aerial vehicles-based internet of things services: Comprehensive survey and future perspectives,\" IEEE Internet of Things Journal, vol. 3, no. 6, pp. 899-922, 2016.", + "[8] H. Yang, M. Zheng, Z. Shao, Y. Jiang, and Z. Xiong, \"Intelligent computation offloading and trajectory planning for 3d target search in low-altitude economy scenarios,\" IEEE Wireless Communications Letters, pp. 1-1, 2025.", + "[9] R. Shakeri, M. A. Al-Garadi, A. Badawy, A. Mohamed, T. Khattab, A. K. Al-Ali, K. A. Harras, and M. Guizani, \"Design challenges of multi-uav systems in cyber-physical applications: A comprehensive survey and future directions,\" IEEE Communications Surveys & Tutorials, vol. 21, no. 4, pp. 3340-3385, 2019.", + "[10] Y. Zhang, X. Gao, N. Ye, D. Niyato, Z. Han, and K. Yang, \"Joint uav deployment, power allocation, and coalition formation for physical layer security in heterogeneous networks,\" IEEE Transactions on Vehicular Technology, pp. 1-15, 2025.", + "[11] Z. Liu, Y. Cao, P. Gao, X. Hua, D. Zhang, and T. Jiang, \"Multi-uav network assisted intelligent edge computing: Challenges and opportunities,\" China Communications, vol. 19, no. 3, pp. 258-278, 2022.", + "[12] Y. Liu, X. Gong, J. Chen, S. Chen, and Y. Yang, \"Rotation-invariant siamese network for low-altitude remote-sensing image registration,\" IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing, vol. 13, pp. 5746-5758, 2020.", + "[13] G. Cheng, X. Song, Z. Lyu, and J. Xu, \"Networked isac for low-altitude economy: Coordinated transmit beamforming and UAV trajectory design,\" IEEE Transactions on Communications, pp. 1-1, 2025.", + "[14] G. Cheng, X. Song, Z. Lyu, and J. Xu, “Networked isac for low-altitude economy: Transmit beamforming and uav trajectory design,” in 2024 IEEE/CIC International Conference on Communications in China (ICCC), 2024, pp. 78-83.", + "[15] X. Zheng, G. Sun, J. Li, J. Wang, Q. Wu, D. Niyato, and A. Jamalipour, \"Uav swarm-enabled collaborative post-disaster communications in low altitude economy via a two-stage optimization approach,\" arXiv preprint arXiv:2501.05742, 2025.", + "[16] China's low-altitude economy soars at high speed. Accessed: Dec. 19, 2024. [Online]. Available: https://www.chinadaily.com.cn/a/202412/19/WS6763b8b7a310f1265a1d3d24.html", + "[17] China's low-altitude economy takes flight: A new engine for innovation-driven growth. Accessed: Mar. 17, 2025. [Online]. Available: https://www.chinadaily.com.cn/a/202412/19/WS6763b8b7a310f1265a1d3d24.html", + "[18] Flying air taxis move closer to us takeoff with issuing of FAA rule. Accessed: Oct. 22, 2024. [Online]. Available: https://www.usnews.com/news/business/articles/2024-10-22/flying-air-taxis-move-closer-to-us-takeoff-with-issuing-of-faa-rule", + "[19] A. Rugo, C. A. Ardagna, and N. E. Ioini, “A security review in the uavnet era: Threats, countermeasures, and gap analysis,” ACM Comput. Surv., vol. 55, no. 1, Jan. 2022. [Online]. Available: https://doi.org/10.1145/3485272", + "[20] X. Wang, Z. Zhao, L. Yi, Z. Ning, L. Guo, F. R. Yu, and S. Guo, \"A survey on security of uav swarm networks: Attacks and countermeasures,\" ACM Comput. Surv., vol. 57, no. 3, Nov. 2024. [Online]. Available: https://doi.org/10.1145/3703625", + "[21] O. Ceviz, S. Sen, and P. Sadioglu, “A survey of security in uavs and fanets: issues, threats, analysis of attacks, and solutions,” IEEE Communications Surveys & Tutorials, pp. 1-1, 2024.", + "[22] H. J. Hadi, Y. Cao, K. U. Nisa, A. M. Jamil, and Q. Ni, \"A comprehensive survey on security, privacy issues and emerging defence technologies for uavs,\" Journal of Network and Computer Applications, vol. 213, p. 103607, 2023. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S1084804523000267", + "[23] V. Hassija, V. Chamola, A. Agrawal, A. Goyal, N. C. Luong, D. Niyato, F. R. Yu, and M. Guizani, \"Fast, reliable, and secure drone communication: A comprehensive survey,\" IEEE Communications Surveys & Tutorials, vol. 23, no. 4, pp. 2802-2832, 2021." + ], + "bbox": [ + 513, + 70, + 921, + 944 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 905, + 31, + 919, + 40 + ], + "page_idx": 25 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[24] B. Zolfaghari, M. Abbasmollaei, F. Hajizadeh, N. Yanai, and K. Bibak, \"Secure uav (drone) and the great promise of ai,\" ACM Comput. Surv., vol. 56, no. 11, Jul. 2024. [Online]. Available: https://doi.org/10.1145/3673225", + "[25] X. Wei, J. Ma, and C. Sun, “A survey on security of unmanned aerial vehicle systems: Attacks and countermeasures,” IEEE Internet of Things Journal, vol. 11, no. 21, pp. 34826-34847, 2024.", + "[26] M. Adil, M. A. Jan, Y. Liu, H. Abulkasim, A. Farouk, and H. Song, \"A systematic survey: Security threats to UAV-aided IoT applications, taxonomy, current challenges and requirements with future research directions,\" IEEE Transactions on Intelligent Transportation Systems, vol. 24, no. 2, pp. 1437-1455, 2023.", + "[27] N. Kumar and A. Chaudhary, \"Surveying cybersecurity vulnerabilities and countermeasures for enhancing uav security,\" Computer Networks, vol. 252, p. 110695, 2024. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S1389128624005279", + "[28] J. Wang, X. Wang, R. Gao, C. Lei, W. Feng, N. Ge, S. Jin, and T. Q. S. Quek, “Physical layer security for uav communications: A comprehensive survey,” China Communications, vol. 19, no. 9, pp. 77–115, 2022.", + "[29] A. Fotouhi, H. Qiang, M. Ding, M. Hassan, L. G. Giordano, A. Garcia-Rodriguez, and J. Yuan, \"Survey on uav cellular communications: Practical aspects, standardization advancements, regulation, and security challenges,\" IEEE Communications Surveys & Tutorials, vol. 21, no. 4, pp. 3417-3442, 2019.", + "[30] M. Adil, H. Song, S. Mastorakis, H. Abulkasim, A. Farouk, and Z. Jin, \"Uav-assisted IoT applications, cybersecurity threats, ai-enabled solutions, open challenges with future research directions,\" IEEE Transactions on Intelligent Vehicles, vol. 9, no. 4, pp. 4583-4605, 2024.", + "[31] W. U. Khan, E. Lagunas, Z. Ali, M. A. Javed, M. Ahmed, S. Chatzinotas, B. Ottersten, and P. Popovski, \"Opportunities for physical layer security in uav communication enhanced with intelligent reflective surfaces,\" IEEE Wireless Communications, vol. 29, no. 6, pp. 22-28, 2022.", + "[32] J. Wang, H. Du, D. Niyato, M. Zhou, J. Kang, and H. Vincent Poor, \"Acceleration estimation of signal propagation path length changes for wireless sensing,\" IEEE Transactions on Wireless Communications, vol. 23, no. 9, pp. 11476-11492, 2024.", + "[33] T. Wang, C.-K. Wen, H. Wang, F. Gao, T. Jiang, and S. Jin, \"Deep learning for wireless physical layer: Opportunities and challenges,\" China Communications, vol. 14, no. 11, pp. 92-111, 2017.", + "[34] J. Wang, H. Du, D. Niyato, J. Kang, S. Cui, X. Shen, and P. Zhang, \"Generative ai for integrated sensing and communication: Insights from the physical layer perspective,\" IEEE Wireless Communications, vol. 31, no. 5, pp. 246-255, 2024.", + "[35] S. Li, L. Xiao, Y. Liu, G. Liu, P. Xiao, and T. Jiang, \"Performance analysis for orthogonal time frequency space modulation systems with generalized waveform,\" China Communications, vol. 20, no. 4, pp. 57-72, 2023.", + "[36] N. Xie, W. Xiong, M. Sha, T. Hu, P. Zhang, L. Huang, and D. Niyato, \"Physical layer authentication with high compatibility using an encoding approach,\" IEEE Transactions on Communications, vol. 70, no. 12, pp. 8270-8285, 2022.", + "[37] S. Liu, T. Wang, and S. Wang, \"Toward intelligent wireless communications: Deep learning - based physical layer technologies,\" Digital Communications and Networks, vol. 7, no. 4, pp. 589-597, 2021. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S2352864821000742", + "[38] Y. Zhang, Y. Peng, X. Tang, L. Xiao, and T. Jiang, \"Large-scale fading decoding aided user-centric cell-free massive mimo: Uplink error probability analysis and detector design,\" IEEE Transactions on Wireless Communications, vol. 23, no. 8, pp. 10336-10349, 2024.", + "[39] H. Du, J. Wang, D. Niyato, J. Kang, Z. Xiong, J. Zhang, and X. Shen, \"Semantic communications for wireless sensing: Ris-aided encoding and self-supervised decoding,\" IEEE Journal on Selected Areas in Communications, vol. 41, no. 8, pp. 2547-2562, 2023.", + "[40] P. Yang, X. Xi, K. Guo, T. Q. S. Quek, J. Chen, and X. Cao, \"Proactive uav network slicing for urllc and mobile broadband service multiplexing,\" IEEE Journal on Selected Areas in Communications, vol. 39, no. 10, pp. 3225-3244, 2021.", + "[41] J. Huang, A. Wang, G. Sun, J. Li, J. Wang, H. Du, and D. Niyato, \"Dual uav cluster-assisted maritime physical layer secure communications via collaborative beamforming,\" IEEE Internet of Things Journal, pp. 1-1, 2024.", + "[42] Z. Duan, Z. Chang, N. Xie, W. Sun, and D. T. Niyato, \"Adaptive strategies in enhancing physical layer security: A comprehensive" + ], + "bbox": [ + 81, + 71, + 491, + 944 + ], + "page_idx": 26 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "survey,\" ACM Comput. Surv., vol. 57, no. 7, Feb. 2025. [Online]. Available: https://doi.org/10.1145/3715319", + "[43] Q. Wang, Z. Chen, W. Mei, and J. Fang, \"Improving physical layer security using uav-enabled mobile relaying,\" IEEE Wireless Communications Letters, vol. 6, no. 3, pp. 310-313, 2017.", + "[44] S. Liu, H. Yang, M. Zheng, L. Xiao, Z. Xiong, and D. Niyato, “Uav-enabled semantic communication in mobile edge computing under jamming attacks: An intelligent resource management approach,” IEEE Transactions on Wireless Communications, vol. 23, no. 11, pp. 17 493–17 507, 2024.", + "[45] S. Bi, K. Li, S. Hu, W. Ni, C. Wang, and X. Wang, “Detection and mitigation of position spoofing attacks on cooperative uav swarm formations,” IEEE Transactions on Information Forensics and Security, vol. 19, pp. 1883–1895, 2024.", + "[46] X. Sun, D. W. K. Ng, Z. Ding, Y. Xu, and Z. Zhong, \"Physical layer security in uav systems: Challenges and opportunities,\" IEEE Wireless Communications, vol. 26, no. 5, pp. 40-47, 2019.", + "[47] G. Zhang, Q. Hu, Y. Zhang, Y. Dai, and T. Jiang, \"Lightweight cross-domain authentication scheme for securing wireless IoT devices using backscatter communication,\" IEEE Internet of Things Journal, vol. 11, no. 12, pp. 22021-22035, 2024.", + "[48] Q. Wu, W. Mei, and R. Zhang, \"Safeguarding wireless network with uavs: A physical layer security perspective,\" IEEE Wireless Communications, vol. 26, no. 5, pp. 12-18, 2019.", + "[49] H.-M. Wang, X. Zhang, and J.-C. Jiang, “Uav-involved wireless physical-layer secure communications: Overview and research directions,” IEEE Wireless Communications, vol. 26, no. 5, pp. 32-39, 2019.", + "[50] B. Li, Z. Fei, Y. Zhang, and M. Guizani, \"Secure uav communication networks over 5g,\" IEEE Wireless Communications, vol. 26, no. 5, pp. 114-120, 2019.", + "[51] L. Bai, L. Zhu, J. Liu, J. Choi, and W. Zhang, \"Physical layer authentication in wireless communication networks: A survey,\" Journal of Communications and Information Networks, vol. 5, no. 3, pp. 237-264, 2020.", + "[52] N. Xie, Z. Li, and H. Tan, \"A survey of physical-layer authentication in wireless communications,\" IEEE Communications Surveys & Tutorials, vol. 23, no. 1, pp. 282-310, 2021.", + "[53] Y. Xu, T. Zhang, D. Yang, Y. Liu, and M. Tao, \"Joint resource and trajectory optimization for security in uav-assisted mec systems,\" IEEE Transactions on Communications, vol. 69, no. 1, pp. 573-588, 2021.", + "[54] Y. Zhang, Z. Kuang, Y. Feng, and F. Hou, \"Task offloading and trajectory optimization for secure communications in dynamic user multi-uav mec systems,\" IEEE Transactions on Mobile Computing, vol. 23, no. 12, pp. 14427-14440, 2024.", + "[55] Y. Zhang, X. Gao, H. Yuan, K. Yang, J. Kang, P. Wang, and D. Niyato, \"Joint uav trajectory and power allocation with hybrid fso/rf for secure space-air-ground communications,\" IEEE Internet of Things Journal, vol. 11, no. 19, pp. 31407-31421, 2024.", + "[56] W. Wang, X. Li, R. Wang, K. Cumanan, W. Feng, Z. Ding, and O. A. Dobre, \"Robust 3d-trajectory and time switching optimization for dual-uav-enabled secure communications,\" IEEE Journal on Selected Areas in Communications, vol. 39, no. 11, pp. 3334-3347, 2021.", + "[57] C. Wen, L. Qiu, and X. Liang, \"Securing uav communication with mobile uav eavesdroppers: Joint trajectory and communication design,\" in 2021 IEEE Wireless Communications and Networking Conference (WCNC), 2021, pp. 1-6.", + "[58] W. Lu, Y. Ding, Y. Gao, S. Hu, Y. Wu, N. Zhao, and Y. Gong, \"Resource and trajectory optimization for secure communications in dual unmanned aerial vehicle mobile edge computing systems,\" IEEE Transactions on Industrial Informatics, vol. 18, no. 4, pp. 2704-2713, 2022.", + "[59] F. Lu, G. Liu, W. Lu, Y. Gao, J. Cao, N. Zhao, and A. Nallanathan, \"Resource and trajectory optimization for uav-relay-assisted secure maritime mec,\" IEEE Transactions on Communications, vol. 72, no. 3, pp. 1641-1652, 2024.", + "[60] A. S. Abdalla, A. Behfarnia, and V. Marojevic, \"Uav trajectory and multi-user beamforming optimization for clustered users against passive eavesdropping attacks with unknown csi,\" IEEE Transactions on Vehicular Technology, vol. 72, no. 11, pp. 14426-14442, 2023.", + "[61] Y. Ding, H. Han, W. Lu, Y. Wang, N. Zhao, X. Wang, and X. Yang, \"Ddqn-based trajectory and resource optimization for uav-aided mec secure communications,\" IEEE Transactions on Vehicular Technology, vol. 73, no. 4, pp. 6006-6011, 2024.", + "[62] H. Kang, X. Chang, J. Mišić, V. B. Mišić, J. Fan, and J. Bai, “Improving dual-uav aided ground-uav bi-directional communication security: Joint uav trajectory and transmit power optimization,” IEEE Transactions on Vehicular Technology, vol. 71, no. 10, pp. 10570–10583, 2022." + ], + "bbox": [ + 513, + 71, + 919, + 944 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 905, + 31, + 919, + 40 + ], + "page_idx": 26 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[63] Y. Zhang, Z. Mou, F. Gao, J. Jiang, R. Ding, and Z. Han, \"Uav-enabled secure communications by multi-agent deep reinforcement learning,\" IEEE Transactions on Vehicular Technology, vol. 69, no. 10, pp. 11599-11611, 2020.", + "[64] Y. Liu, C. Huang, G. Chen, R. Song, S. Song, and P. Xiao, “Deep learning empowered trajectory and passive beamforming design in uav-ris enabled secure cognitive non-terrestrial networks,” IEEE Wireless Communications Letters, vol. 13, no. 1, pp. 188–192, 2024.", + "[65] J. Wang, R. Wang, Z. Zheng, R. Lin, L. Wu, and F. Shu, \"Physical layer security enhancement in uav-assisted cooperative jamming for cognitive radio networks: A mappo-lstm deep reinforcement learning approach,\" IEEE Transactions on Vehicular Technology, pp. 1-14, 2024.", + "[66] X. Tang, N. Liu, R. Zhang, and Z. Han, \"Deep learning-assisted secure uav-relaying networks with channel uncertainties,\" IEEE Transactions on Vehicular Technology, vol. 71, no. 5, pp. 5048-5059, 2022.", + "[67] X. Li, R. Yao, Y. Fan, P. Wang, and J. Xu, \"Secure efficiency map-enabled uav trajectory planning,\" IEEE Wireless Communications Letters, vol. 12, no. 8, pp. 1324-1328, 2023.", + "[68] R. Karmakar, G. Kaddoum, and O. Akhrif, “A novel federated learning-based smart power and 3d trajectory control for fairness optimization in secure uav-assisted mec services,” IEEE Transactions on Mobile Computing, vol. 23, no. 5, pp. 4832–4848, 2024.", + "[69] Z. Li, X. Liao, J. Shi, L. Li, and P. Xiao, “Md-gan-based uav trajectory and power optimization for cognitive covert communications,” IEEE Internet of Things Journal, vol. 9, no. 12, pp. 10187-10199, 2022.", + "[70] S. Jia, L. Xiaomeng, L. Xiaomin, T. Zhuangzhuang, and H. Junfan, \"Covert leo satellite communication aided by generative adversarial network based cooperative uav jamming,\" China Communications, vol. 21, no. 9, pp. 27-39, 2024.", + "[71] C. Zhang, G. Sun, J. Li, Q. Wu, J. Wang, D. Niyato, and Y. Liu, \"Multi-objective aerial collaborative secure communication optimization via generative diffusion model-enabled deep reinforcement learning,\" IEEE Transactions on Mobile Computing, pp. 1-18, 2024.", + "[72] T. Alladi, Naren, G. Bansal, V. Chamola, and M. Guizani, \"Secauthuav: A novel authentication scheme for uav-ground station and uav-uav communication,\" IEEE Transactions on Vehicular Technology, vol. 69, no. 12, pp. 15068-15077, 2020.", + "[73] R. Karmakar, G. Kaddoum, and O. Akhrif, \"A puf and fuzzy extractor-based uav-ground station and uav-uav authentication mechanism with intelligent adaptation of secure sessions,\" IEEE Transactions on Mobile Computing, vol. 23, no. 5, pp. 3858-3875, 2024.", + "[74] M. Tanveer, A. Aldosary, S.-u.-d. Khokhar, A. K. Das, S. A. Aldossari, and S. A. Chaudhry, “Paf-iod: Puf-enabled authentication framework for the internet of drones,” IEEE Transactions on Vehicular Technology, vol. 73, no. 7, pp. 9560–9574, 2024.", + "[75] S. J. Maeng, Y. Yapici, i. Guvenc, A. Bhuyan, and H. Dai, “Precoder design for physical-layer security and authentication in massive mimo uav communications,” IEEE Transactions on Vehicular Technology, vol. 71, no. 3, pp. 2949–2964, 2022.", + "[76] Y. Zhou, Z. Ma, H. Liu, P. L. Yeoh, Y. Li, B. Vucetic, and P. Fan, \"A uav-aided physical layer authentication based on channel characteristics and geographical locations,\" IEEE Transactions on Vehicular Technology, vol. 73, no. 1, pp. 1053–1064, 2024.", + "[77] Y. Zhou, Y. Wang, Z. Ma, P. Fan, and M. Xiao, \"Physical layer authentication for uav communications under rayleigh and rician channels,\" IEEE Transactions on Wireless Communications, pp. 1-1, 2025.", + "[78] Y.-S. Shiu, S. Y. Chang, H.-C. Wu, S. C.-H. Huang, and H.-H. Chen, \"Physical layer security in wireless networks: a tutorial,\" IEEE Wireless Communications, vol. 18, no. 2, pp. 66-74, 2011.", + "[79] J. Xu, D. Li, Z. Zhu, Z. Yang, N. Zhao, and D. Niyato, “Anti-jamming design for integrated sensing and communication via aerial iris,” IEEE Transactions on Communications, vol. 72, no. 8, pp. 4607–4619, 2024.", + "[80] B. Duo, Q. Wu, X. Yuan, and R. Zhang, “Anti-jamming 3d trajectory design for uav-enabled wireless sensor networks under probabilistic loss channel,” IEEE Transactions on Vehicular Technology, vol. 69, no. 12, pp. 16288-16293, 2020.", + "[81] Y. Wu, W. Yang, X. Guan, and Q. Wu, \"Energy-efficient trajectory design for uav-enabled communication under malicious jamming,\" IEEE Wireless Communications Letters, vol. 10, no. 2, pp. 206-210, 2021.", + "[82] Y. Wu, W. Yang, X. Guan, and Q. Wu, \"Uav-enabled relay communication under malicious jamming: Joint trajectory and transmit power optimization,\" IEEE Transactions on Vehicular Technology, vol. 70, no. 8, pp. 8275-8279, 2021." + ], + "bbox": [ + 81, + 71, + 491, + 943 + ], + "page_idx": 27 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[83] M. A. Aref, S. K. Jayaweera, and S. Machuzak, \"Multi-agent reinforcement learning based cognitive anti-jamming,\" in 2017 IEEE Wireless Communications and Networking Conference (WCNC), 2017, pp. 1-6.", + "[84] L. Jia, F. Yao, Y. Sun, Y. Xu, S. Feng, and A. Anpalagan, “A hierarchical learning solution for anti-jamming stackelberg game with discrete power strategies,” IEEE Wireless Communications Letters, vol. 6, no. 6, pp. 818–821, 2017.", + "[85] X. Liu, Y. Xu, L. Jia, Q. Wu, and A. Anpalagan, “Anti-jamming communications using spectrum waterfall: A deep reinforcement learning approach,” IEEE Communications Letters, vol. 22, no. 5, pp. 998–1001, 2018.", + "[86] H. Yang, Z. Xiong, J. Zhao, D. Niyato, Q. Wu, H. V. Poor, and M. Tornatore, \"Intelligent reflecting surface assisted anti-jamming communications: A fast reinforcement learning approach,\" IEEE Transactions on Wireless Communications, vol. 20, no. 3, pp. 1963-1974, 2021.", + "[87] Z. Yin, Y. Lin, Y. Zhang, Y. Qian, F. Shu, and J. Li, \"Collaborative multiagent reinforcement learning aided resource allocation for uav anti-jamming communication,\" IEEE Internet of Things Journal, vol. 9, no. 23, pp. 23995-24008, 2022.", + "[88] Y. Ma, K. Liu, Y. Liu, X. Wang, and Z. Zhao, \"An intelligent game-based anti-jamming solution using adversarial populations for aerial communication networks,\" IEEE Transactions on Cognitive Communications and Networking, pp. 1-1, 2024.", + "[89] Z. Shao, H. Yang, L. Xiao, W. Su, Y. Chen, and Z. Xiong, \"Deep reinforcement learning-based resource management for uav-assisted mobile edge computing against jamming,\" IEEE Transactions on Mobile Computing, vol. 23, no. 12, pp. 13358-13374, 2024.", + "[90] Y. Zhou, P. L. Yeoh, K. J. Kim, Z. Ma, Y. Li, and B. Vucetic, \"Game theoretic physical layer authentication for spoofing detection in uav communications,\" IEEE Transactions on Vehicular Technology, vol. 71, no. 6, pp. 6750-6755, 2022.", + "[91] Q. Cheng, Y. Zhou, H. Liu, L. Yang, Z. Ma, and P. Fan, \"Physical layer authentication in uav communications with channel randomness and jamming uncertainty,\" IEEE Transactions on Vehicular Technology, pp. 1-6, 2025.", + "[92] A. Eldosouky, A. Ferdowsi, and W. Saad, “Drones in distress: A game-theoretic countermeasure for protecting uavs against gps spoofing,” IEEE Internet of Things Journal, vol. 7, no. 4, pp. 2840–2854, 2020.", + "[93] D. She, W. Wang, Z. Yin, J. Wang, and H. Shan, \"Gps spoofing attack recognition for uavs with limited samples,\" IEEE Internet of Things Journal, vol. 12, no. 1, pp. 250-261, 2025.", + "[94] Y. Dang, C. Benzaid, B. Yang, T. Taleb, and Y. Shen, \"Deep-ensemble-learning-based gps spoofing detection for cellular-connected uavs,\" IEEE Internet of Things Journal, vol. 9, no. 24, pp. 25068-25085, 2022.", + "[95] X. Wang, J. Wang, Y. Xu, J. Chen, L. Jia, X. Liu, and Y. Yang, \"Dynamic spectrum anti-jamming communications: Challenges and opportunities,\" IEEE Communications Magazine, vol. 58, no. 2, pp. 79-85, 2020.", + "[96] L. Zhang, G. Ding, Q. Wu, and Z. Han, \"Spectrum sensing under spectrum misuse behaviors: A multi-hypothesis test perspective,\" IEEE Transactions on Information Forensics and Security, vol. 13, no. 4, pp. 993-1007, 2018.", + "[97] S. C. Hassler, U. A. Mughal, and M. Ismail, “Cyber-physical intrusion detection system for unmanned aerial vehicles,” IEEE Transactions on Intelligent Transportation Systems, vol. 25, no. 6, pp. 6106–6117, 2024.", + "[98] A. Krayani, A. S. Alam, L. Marcenaro, A. Nallanathan, and C. Regazzoni, \"An emergent self-awareness module for physical layer security in cognitive uav radios,\" IEEE Transactions on Cognitive Communications and Networking, vol. 8, no. 2, pp. 888-906, 2022.", + "[99] A. Krayani, A. S. Alam, L. Marcenaro, A. Nallanathan, and C. Regazzoni, \"Automatic jamming signal classification in cognitive uav radios,\" IEEE Transactions on Vehicular Technology, vol. 71, no. 12, pp. 12972-12988, 2022.", + "[100] A. Krayani, A. S. Alam, L. Marcenaro, A. Nallanathan, and C. Regazzoni, “A novel resource allocation for anti-jamming in cognitive-uavs: An active inference approach,” IEEE Communications Letters, vol. 26, no. 10, pp. 2272–2276, 2022.", + "[101] D. Darsena, G. Gelli, I. Iudice, and F. Verde, “Detection and blind channel estimation for uav-aided wireless sensor networks in smart cities under mobile jamming attack,” IEEE Internet of Things Journal, vol. 9, no. 14, pp. 11932–11950, 2022.", + "[102] L. Zhang, G. Ding, Q. Wu, and P. Liu, “Detection of abnormal power emission in uav communication networks,” IEEE Wireless Communications Letters, vol. 8, no. 4, pp. 1179–1182, 2019." + ], + "bbox": [ + 509, + 70, + 919, + 943 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "28", + "bbox": [ + 905, + 31, + 919, + 40 + ], + "page_idx": 27 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[103] T. M. Hoang, N. M. Nguyen, and T. Q. Duong, “Detection of eavesdropping attack in uav-aided wireless systems: Unsupervised learning with one-class svm and k-means clustering,” IEEE Wireless Communications Letters, vol. 9, no. 2, pp. 139–142, 2020.", + "[104] Y. An, R. Kang, Y. Ban, and S. Yang, “Beidou receiver based on anti-jamming antenna arrays with self-calibration for precise relative positioning,” Journal of Systems Engineering and Electronics, vol. 35, no. 5, pp. 1132–1147, 2024.", + "[105] H. Sathaye and A. Ranganathan, “Semperfi: a psychoer eliminating standalone gps receiver,” in Proceedings of the 13th ACM Conference on Security and Privacy in Wireless and Mobile Networks, ser. WiSec '20. New York, NY, USA: Association for Computing Machinery, 2020, p. 353–355. [Online]. Available: https://doi.org/10.1145/3395351.3401703", + "[106] H. Sathaye, G. LaMountain, P. Closas, and A. Ranganathan, “Semperfi: Anti-spoofing gps receiver for uavs,” in Network and Distributed Systems Security (NDSS) Symposium 2022, 2022.", + "[107] S. Han, L. Chen, W. Meng, and C. Li, \"Improve the security of gnsss receivers through spoofing mitigation,\" IEEE Access, vol. 5, pp. 21057-21069, 2017.", + "[108] X. Ye, Y. Mao, X. Yu, S. Sun, L. Fu, and J. Xu, \"Integrated sensing and communications for low-altitude economy: A deep reinforcement learning approach,\" arXiv preprint arXiv:2412.04074, 2024.", + "[109] C. Huang, S. Fang, H. Wu, Y. Wang, and Y. Yang, \"Low-altitude intelligent transportation: System architecture, infrastructure, and key technologies,\" Journal of Industrial Information Integration, vol. 42, p. 100694, 2024. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S2452414X24001377", + "[110] Y. Yang, Y. Chen, J. Wang, G. Sun, and D. Niyato, \"Embodied aiempowered low altitude economy: Integrated sensing, communications, computation, and control (isc3),\" arXiv preprint arXiv:2412.19996, 2024.", + "[111] J. Li, G. Sun, Q. Wu, S. Liang, J. Wang, D. Niyato, and D. I. Kim, \"Aerial secure collaborative communications under eavesdropper collusion in low-altitude economy: A generative swarm intelligent approach,\" arXiv preprint arXiv:2503.00721, 2025.", + "[112] G. Sun, W. Xie, D. Niyato, H. Du, J. Kang, J. Wu, S. Sun, and P. Zhang, \"Generative ai for advanced uav networking,\" IEEE Network, pp. 1-1, 2024.", + "[113] X. Tang, X. Li, R. Yu, Y. Wu, J. Ye, F. Tang, and Q. Chen, \"Digital-twin-assisted task assignment in multi-uav systems: A deep reinforcement learning approach,\" IEEE Internet of Things Journal, vol. 10, no. 17, pp. 15362-15375, 2023.", + "[114] X. Tang, Q. Chen, R. Yu, and X. Li, \"Digital twin-empowered task assignment in aerial mec network: A resource coalition cooperation approach with generative model,\" IEEE Transactions on Network Science and Engineering, vol. 12, no. 1, pp. 13-27, 2025.", + "[115] Y. Jiang, X. Li, G. Zhu, H. Li, J. Deng, and Q. Shi, \"6g non-terrestrial networks enabled low-altitude economy: Opportunities and challenges,\" ArXiv, vol. abs/2311.09047, 2023. [Online]. Available: https://api_semanticscholar.org/CorpusID:265213350", + "[116] X. Luo, Y. Zhang, Z. He, G. Yang, and Z. Ji, \"A two-step environment-learning-based method for optimal uav deployment,\" IEEE Access, vol. 7, pp. 149328-149340, 2019.", + "[117] X. Tang, Q. Chen, W. Weng, B. Liao, J. Wang, X. Cao, and X. Li, \"Dnn task assignment in uav networks: A generative ai enhanced multi-agent reinforcement learning approach,\" IEEE Internet of Things Journal, pp. 1-1, 2025.", + "[118] H. Yang, J. Zhao, Z. Xiong, K.-Y. Lam, S. Sun, and L. Xiao, \"Privacy-preserving federated learning for uav-enabled networks: Learning-based joint scheduling and resource management,\" IEEE Journal on Selected Areas in Communications, vol. 39, no. 10, pp. 3144-3159, 2021.", + "[119] X. Cai, T. Izydorczyk, J. Rodríguez-Pineiro, I. Z. Kovács, J. Wigard, F. M. L. Tavares, and P. E. Mogensen, \"Empirical low-altitude air-to-ground spatial channel characterization for cellular networks connectivity,\" IEEE Journal on Selected Areas in Communications, vol. 39, no. 10, pp. 2975-2991, 2021.", + "[120] Y. Zhao, F. Zhou, L. Feng, W. Li, Y. Sun, and M. A. Imran, \"Backhaul-constrained coverage analysis of integrated high and low altitude platforms aerial communication system in post-disaster areas,\" IEEE Communications Letters, vol. 27, no. 6, pp. 1629-1633, 2023.", + "[121] S. H. Alsamhi, F. A. Almalki, F. Afghah, A. Hawbani, A. V. Shvetsov, B. Lee, and H. Song, \"Drones' edge intelligence over smart environments in b5g: Blockchain and federated learning synergy,\" IEEE Transactions on Green Communications and Networking, vol. 6, no. 1, pp. 295-312, 2022." + ], + "bbox": [ + 76, + 70, + 491, + 944 + ], + "page_idx": 28 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[122] A. Ahmad, A. A. Cheema, and D. Finlay, \"A survey of radio propagation channel modelling for low altitude flying base stations,\" Computer Networks, vol. 171, p. 107122, 2020. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S1389128619310692", + "[123] I. Bozcan and E. Kayacan, \"Context-dependent anomaly detection for low altitude traffic surveillance,\" in 2021 IEEE International Conference on Robotics and Automation (ICRA), 2021, pp. 224-230.", + "[124] Y. Liu, X. Gong, and Y. Yang, \"A multilayer fusion network with rotation-invariant and dynamic feature representation for multiview low-altitude image registration,\" IEEE Geoscience and Remote Sensing Letters, vol. 18, no. 6, pp. 1019-1023, 2021.", + "[125] A. Omri and M. O. Hasna, \"Physical layer security analysis of uav based communication networks,\" in 2018 IEEE 88th Vehicular Technology Conference (VTC-Fall), 2018, pp. 1-6.", + "[126] S. Samonas and D. Coss, “The cia strikes back: Redefining confidentiality, integrity and availability in security.” Journal of Information System Security, vol. 10, no. 3, 2014.", + "[127] C. Zhao, H. Du, D. Niyato, J. Kang, Z. Xiong, D. I. Kim, X. Shen, and K. B. Letaief, \"Generative ai for secure physical layer communications: A survey,\" IEEE Transactions on Cognitive Communications and Networking, vol. 11, no. 1, pp. 3-26, 2025.", + "[128] J. M. Hamamreh, H. M. Furqan, and H. Arslan, \"Classifications and applications of physical layer security techniques for confidentiality: A comprehensive survey,\" IEEE Communications Surveys & Tutorials, vol. 21, no. 2, pp. 1773-1828, 2019.", + "[129] M. Shakiba-Herfeh, A. Chorti, and H. Vincent Poor, “Physical layer security: Authentication, integrity, and confidentiality,” Physical layer security, pp. 129–150, 2021.", + "[130] S. Hu, Q. Wu, and X. Wang, \"Energy management and trajectory optimization for uav-enabled legitimate monitoring systems,\" IEEE Transactions on Wireless Communications, vol. 20, no. 1, pp. 142-155, 2021.", + "[131] D. Wang, B. Bai, W. Zhao, and Z. Han, “A survey of optimization approaches for wireless physical layer security,” IEEE Communications Surveys & Tutorials, vol. 21, no. 2, pp. 1878–1911, 2019.", + "[132] M. A. Arfaoui, M. D. Soltani, I. Tavakkolnia, A. Ghrayeb, M. Safari, C. M. Assi, and H. Haas, \"Physical layer security for visible light communication systems: A survey,\" IEEE Communications Surveys & Tutorials, vol. 22, no. 3, pp. 1887-1908, 2020.", + "[133] Z. Yin, M. Jia, N. Cheng, W. Wang, F. Lyu, Q. Guo, and X. Shen, \"Uav-assisted physical layer security in multi-beam satellite-enabled vehicle communications,\" IEEE Transactions on Intelligent Transportation Systems, vol. 23, no. 3, pp. 2739-2751, 2022.", + "[134] X. Fang, N. Zhang, S. Zhang, D. Chen, X. Sha, and X. Shen, \"On physical layer security: Weighted fractional fourier transform based user cooperation,\" IEEE Transactions on Wireless Communications, vol. 16, no. 8, pp. 5498-5510, 2017.", + "[135] W. Tian, X. Ding, G. Liu, Y. Dai, and Z. Han, “A uav-assisted secure communication system by jointly optimizing transmit power and trajectory in the internet of things,” IEEE Transactions on Green Communications and Networking, vol. 7, no. 4, pp. 2025–2037, 2023.", + "[136] F. Irram, M. Ali, M. Naeem, and S. Mumtaz, \"Physical layer security for beyond 5g/6g networks: Emerging technologies and future directions,\" Journal of Network and Computer Applications, vol. 206, p. 103431, 2022. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S108480452200087X", + "[137] W. Lu, P. Si, F. Lu, B. Li, Z. Liu, S. Hu, and Y. Gong, \"Resource and trajectory optimization in uav-powered wireless communication system,\" Science China Information Sciences, vol. 64, no. 4, p. 140304, Mar 2021, accessed: 2025-01-03. [Online]. Available: https://doi.org/10.1007/s11432-020-3060-4", + "[138] J. Luo, Z. Wang, M. Xia, L. Wu, Y. Tian, and Y. Chen, \"Path planning for uav communication networks: Related technologies, solutions, and opportunities,\" ACM Comput. Surv., vol. 55, no. 9, Jan. 2023. [Online]. Available: https://doi-org.remotexs.ntu.edu.sg/10.1145/3560261", + "[139] A. V. Savkin, H. Huang, and W. Ni, “Securing uav communication in the presence of stationary or mobile eavesdroppers via online 3d trajectory planning,” IEEE Wireless Communications Letters, vol. 9, no. 8, pp. 1211–1215, 2020.", + "[140] X. Zhou, Q. Wu, S. Yan, F. Shu, and J. Li, \"Uav-enabled secure communications: Joint trajectory and transmit power optimization,\" IEEE Transactions on Vehicular Technology, vol. 68, no. 4, pp. 4069-4073, 2019.", + "[141] R. Ding, F. Gao, and X. S. Shen, \"3d uav trajectory design and frequency band allocation for energy-efficient and fair communication: A deep reinforcement learning approach,\" IEEE Transactions on Wireless Communications, vol. 19, no. 12, pp. 7796-7809, 2020." + ], + "bbox": [ + 506, + 70, + 919, + 944 + ], + "page_idx": 28 + }, + { + "type": "page_number", + "text": "29", + "bbox": [ + 905, + 30, + 919, + 40 + ], + "page_idx": 28 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[142] C. Zhong, J. Yao, and J. Xu, \"Secure uav communication with cooperative jamming and trajectory control,\" IEEE Communications Letters, vol. 23, no. 2, pp. 286-289, 2019.", + "[143] Y. Bai, H. Zhao, X. Zhang, Z. Chang, R. Jantti, and K. Yang, \"Toward autonomous multi-uav wireless network: A survey of reinforcement learning-based approaches,\" IEEE Communications Surveys & Tutorials, vol. 25, no. 4, pp. 3038-3067, 2023.", + "[144] R. Dong, B. Wang, K. Cao, J. Tian, and T. Cheng, \"Secure transmission design of ris enabled uav communication networks exploiting deep reinforcement learning,\" IEEE Transactions on Vehicular Technology, vol. 73, no. 6, pp. 8404-8419, 2024.", + "[145] X. Tang, T. Jiang, J. Liu, B. Li, D. Zhai, F. R. Yu, and Z. Han, \"Secure communication with uav-enabled aerial ris: Learning trajectory with reflection optimization,\" IEEE Transactions on Intelligent Vehicles, pp. 1-10, 2023.", + "[146] J. Duan, Y. Guan, S. E. Li, Y. Ren, Q. Sun, and B. Cheng, \"Distribu-tional soft actor-critic: Off-policy reinforcement learning for addressing value estimation errors,\" IEEE Transactions on Neural Networks and Learning Systems, vol. 33, no. 11, pp. 6584-6598, 2022.", + "[147] W. Chen, X. Qiu, T. Cai, H.-N. Dai, Z. Zheng, and Y. Zhang, “Deep reinforcement learning for internet of things: A comprehensive survey,” IEEE Communications Surveys & Tutorials, vol. 23, no. 3, pp. 1659–1692, 2021.", + "[148] F. Tang, H. Hofner, N. Kato, K. Kaneko, Y. Yamashita, and M. Hangai, “A deep reinforcement learning-based dynamic traffic offloading in space-air-ground integrated networks (sagin),” IEEE Journal on Selected Areas in Communications, vol. 40, no. 1, pp. 276–289, 2022.", + "[149] N. Yang, S. Chen, H. Zhang, and R. Berry, “Beyond the edge: An advanced exploration of reinforcement learning for mobile edge computing, its applications, and future research trajectories,” IEEE Communications Surveys & Tutorials, pp. 1-1, 2024.", + "[150] Q. Mao, F. Hu, and Q. Hao, “Deep learning for intelligent wireless networks: A comprehensive survey,” IEEE Communications Surveys & Tutorials, vol. 20, no. 4, pp. 2595–2621, 2018.", + "[151] P. Consul, I. Budhiraja, and D. Garg, \"A hybrid secure resource allocation and trajectory optimization approach for mobile edge computing using federated learning based on web 3.0,\" IEEE Transactions on Consumer Electronics, vol. 70, no. 1, pp. 1167-1179, 2024.", + "[152] X. Hou, J. Wang, Z. Zhang, J. Wang, L. Liu, and Y. Ren, \"Split federated learning for uav-enabled integrated sensing, computation, and communication,\" arXiv preprint arXiv:2504.01443, 2025.", + "[153] K. Heo, W. Lee, and K. Lee, “Uav-assisted wireless-powered secure communications: Integration of optimization and deep learning,” IEEE Transactions on Wireless Communications, vol. 23, no. 9, pp. 10530–10545, 2024.", + "[154] U. A. Mughal, Y. Alkhrijah, A. Almadhor, and C. Yuen, “Deep learning for secure uav-assisted ris communication networks,” IEEE Internet of Things Magazine, vol. 7, no. 2, pp. 38-44, 2024.", + "[155] R. Dong, B. Wang, and K. Cao, \"Deep learning driven 3d robust beamforming for secure communication of uav systems,\" IEEE Wireless Communications Letters, vol. 10, no. 8, pp. 1643-1647, 2021.", + "[156] M. Chen, U. Challita, W. Saad, C. Yin, and M. Debbah, \"Artificial neural networks-based machine learning for wireless networks: A tutorial,\" IEEE Communications Surveys & Tutorials, vol. 21, no. 4, pp. 3039-3071, 2019.", + "[157] M. T. Nguyen and L. B. Le, “Multi-uav trajectory control, resource allocation, and nomai user pairing for uplink energy minimization,” IEEE Internet of Things Journal, vol. 9, no. 23, pp. 23 728–23 740, 2022.", + "[158] X. Liao, J. Shi, Z. Li, L. Zhang, and B. Xia, “A model-driven deep reinforcement learning heuristic algorithm for resource allocation in ultra-dense cellular networks,” IEEE Transactions on Vehicular Technology, vol. 69, no. 1, pp. 983–997, 2020.", + "[159] X. Liao, J. Si, J. Shi, Z. Li, and H. Ding, \"Generative adversarial network assisted power allocation for cooperative cognitive covert communication system,\" IEEE Communications Letters, vol. 24, no. 7, pp. 1463-1467, 2020.", + "[160] Y. Zhou, P. L. Yeoh, H. Chen, Y. Li, R. Schober, L. Zhuo, and B. Vucetic, \"Improving physical layer security via a uav friendly jammer for unknown eavesdropper location,\" IEEE Transactions on Vehicular Technology, vol. 67, no. 11, pp. 11280-11284, 2018.", + "[161] H. Cao, C. Tan, Z. Gao, Y. Xu, G. Chen, P.-A. Heng, and S. Z. Li, “A survey on generative diffusion models,” IEEE Transactions on Knowledge and Data Engineering, vol. 36, no. 7, pp. 2814–2830, 2024.", + "[162] D. Chen, N. Zhang, N. Cheng, K. Zhang, Z. Qin, and X. Shen, \"Physical layer based message authentication with secure channel" + ], + "bbox": [ + 76, + 71, + 491, + 943 + ], + "page_idx": 29 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "codes,\" IEEE Transactions on Dependable and Secure Computing, vol. 17, no. 5, pp. 1079-1093, 2020.", + "[163] G. Bansal and B. Sikdar, “S-maps: Scalable mutual authentication protocol for dynamic uav swarms,” IEEE Transactions on Vehicular Technology, vol. 70, no. 11, pp. 12088-12100, 2021.", + "[164] B. Chatterjee, D. Das, S. Maity, and S. Sen, \"Rf-puf: Enhancing iot security through authentication of wireless nodes using in-situ machine learning,\" IEEE Internet of Things Journal, vol. 6, no. 1, pp. 388-398, 2019.", + "[165] G. Bansal, N. Naren, V. Chamola, B. Sikdar, N. Kumar, and M. Guizani, \"Lightweight mutual authentication protocol for v2g using physical unclonable function,\" IEEE Transactions on Vehicular Technology, vol. 69, no. 7, pp. 7234-7246, 2020.", + "[166] C. Pu, A. Wall, K.-K. R. Choo, I. Ahmed, and S. Lim, \"A lightweight and privacy-preserving mutual authentication and key agreement protocol for internet of drones environment,\" IEEE Internet of Things Journal, vol. 9, no. 12, pp. 9918-9933, 2022.", + "[167] Z. Zhang, C. Hsu, M. H. Au, L. Harn, J. Cui, Z. Xia, and Z. Zhao, \"Prlap-iod: A puf-based robust and lightweight authentication protocol for internet of drones,\" Computer Networks, vol. 238, p. 110118, 2024. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S1389128623005637", + "[168] J. Liu and X. Wang, \"Physical layer authentication enhancement using two-dimensional channel quantization,\" IEEE Transactions on Wireless Communications, vol. 15, no. 6, pp. 4171-4182, 2016.", + "[169] X. Lu, J. Lei, Y. Shi, and W. Li, \"Improved physical layer authentication scheme based on wireless channel phase,\" IEEE Wireless Communications Letters, vol. 11, no. 1, pp. 198-202, 2022.", + "[170] N. Xie, J. Chen, and L. Huang, “Physical-layer authentication using multiple channel-based features,” IEEE Transactions on Information Forensics and Security, vol. 16, pp. 2356-2366, 2021.", + "[171] Y. Zhou, Z. Ma, H. Liu, P. L. Yeoh, Y. Li, and B. Vucetic, \"Signal-to-noise ratio based physical layer authentication in uav communications,\" in 2023 IEEE 34th Annual International Symposium on Personal, Indoor and Mobile Radio Communications (PIMRC), 2023, pp. 1-6.", + "[172] Y. Shang, Y. Peng, R. Ye, and J. Lee, “Ris-assisted secure uav communication scheme against active jamming and passive eavesdropping,” IEEE Transactions on Intelligent Transportation Systems, vol. 25, no. 11, pp. 16953-16963, 2024.", + "[173] Y. Wu, X. Guan, W. Yang, and Q. Wu, “Uav swarm communication under malicious jamming: Joint trajectory and clustering design,” IEEE Wireless Communications Letters, vol. 10, no. 10, pp. 2264–2268, 2021.", + "[174] Z. Shen, K. Xu, and X. Xia, \"Beam-domain anti-jamming transmission for downlink massive mimo systems: A stackelberg game perspective,\" IEEE Transactions on Information Forensics and Security, vol. 16, pp. 2727-2742, 2021.", + "[175] X. Li, J. Chen, X. Ling, and T. Wu, “Deep reinforcement learning-based anti-jamming algorithm using dual action network,” IEEE Transactions on Wireless Communications, vol. 22, no. 7, pp. 4625–4637, 2023.", + "[176] L. Jia, N. Qi, F. Chu, S. Fang, X. Wang, S. Ma, and S. Feng, \"Game-theoretic learning anti-jamming approaches in wireless networks,\" IEEE Communications Magazine, vol. 60, no. 5, pp. 60-66, 2022.", + "[177] F. Yao and L. Jia, “A collaborative multi-agent reinforcement learning anti-jamming algorithm in wireless networks,” IEEE Wireless Communications Letters, vol. 8, no. 4, pp. 1024–1027, 2019.", + "[178] E. Schmidt, N. Gatsis, and D. Akopian, “A gps spoofing detection and classification correlator-based technique using the lasso,” IEEE Transactions on Aerospace and Electronic Systems, vol. 56, no. 6, pp. 4224–4237, 2020.", + "[179] B. Pardhasaradhi and L. R. Cenkeramaddi, \"Gps spoofing detection and mitigation for drones using distributed radar tracking and fusion,\" IEEE Sensors Journal, vol. 22, no. 11, pp. 11 122-11 134, 2022.", + "[180] Z. Chen, J. Li, J. Li, X. Zhu, and C. Li, \"Gnss multiparameter spoofing detection method based on support vector machine,\" IEEE Sensors Journal, vol. 22, no. 18, pp. 17864-17874, 2022.", + "[181] X. Chen, D. He, X. Yan, W. Yu, and T.-K. Truong, \"Gnss interference type recognition with fingerprint spectrum dnn method,\" IEEE Transactions on Aerospace and Electronic Systems, vol. 58, no. 5, pp. 4745-4760, 2022.", + "[182] Y. Dang, C. Benzaïd, Y. Shen, and T. Taleb, \"Gps spoofing detector with adaptive trustable residence area for cellular based-uavs,\" in GLOBECOM 2020 - 2020 IEEE Global Communications Conference, 2020, pp. 1-6.", + "[183] V. Chandola, A. Banerjee, and V. Kumar, \"Anomaly detection: A survey,\" ACM Comput. Surv., vol. 41, no. 3, Jul. 2009. [Online]. Available: https://doi.org/10.1145/1541880.1541882" + ], + "bbox": [ + 506, + 71, + 919, + 943 + ], + "page_idx": 29 + }, + { + "type": "page_number", + "text": "30", + "bbox": [ + 906, + 30, + 919, + 40 + ], + "page_idx": 29 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[184] B. Balaji and K. Friston, \"Bayesian state estimation using generalized coordinates,\" Signal processing, sensor fusion, and target recognition XX, vol. 8050, pp. 716-727, 2011.", + "[185] M. Baydoun, D. Campo, V. Sanguineti, L. Marcenaro, A. Cavallaro, and C. Regazzoni, “Learning switching models for abnormality detection for autonomous driving,” in 2018 21st International Conference on Information Fusion (FUSION), 2018, pp. 2606–2613.", + "[186] L. Pardo, Statistical inference based on divergence measures. Chapman and Hall/CRC, 2018.", + "[187] A. Krayani, M. Baydoun, L. Marcenaro, A. S. Alam, and C. Regazzoni, \"Self-learning bayesian generative models for jammer detection in cognitive-uav-radios,\" in GLOBECOM 2020 - 2020 IEEE Global Communications Conference, 2020, pp. 1-7.", + "[188] W. Xie, G. Sun, J. Wang, H. Du, J. Kang, K. Huang, and V. Leung, “Multi-objective aerial iris-assisted isac optimization via generative ai-enhanced deep reinforcement learning,” arXiv preprint arXiv:2502.10687, 2025.", + "[189] J. Wang, H. Du, Y. Liu, G. Sun, D. Niyato, S. Mao, D. I. Kim, and X. Shen, \"Generative ai based secure wireless sensing for isac networks,\" arXiv preprint arXiv:2408.11398, 2024.", + "[190] X. Wang, C. P. Tan, Y. Wang, and X. Wang, “Defending uav networks against covert attacks using auxiliary signal injections,” IEEE Transactions on Automation Science and Engineering, pp. 1–13, 2024.", + "[191] M. Valkama, M. Renfors, and V. Koivunen, “Advanced methods for i/q imbalance compensation in communication receivers,” IEEE Transactions on Signal Processing, vol. 49, no. 10, pp. 2335–2344, 2001.", + "[192] J. Zhang and Y. R. Zheng, \"Frequency-domain turbo equalization with soft successive interference cancellation for single carrier mimo underwater acoustic communications,\" IEEE Transactions on Wireless Communications, vol. 10, no. 9, pp. 2872-2882, 2011.", + "[193] P. Madhani, P. Axelrad, K. Krumvieda, and J. Thomas, \"Application of successive interference cancellation to the gps pseudolite near-far problem,\" IEEE Transactions on Aerospace and Electronic Systems, vol. 39, no. 2, pp. 481-488, 2003.", + "[194] P. Patel and J. Holtzman, \"Analysis of a simple successive interference cancellation scheme in a ds/cdma system,\" IEEE Journal on Selected Areas in Communications, vol. 12, no. 5, pp. 796-807, 1994.", + "[195] M. L. Psiaki and T. E. Humphreys, “Gnss spoofing and detection,” Proceedings of the IEEE, vol. 104, no. 6, pp. 1258–1270, 2016.", + "[196] T. E. Humphreys, “Detection strategy for cryptographic gnss anti-spoofing,” IEEE Transactions on Aerospace and Electronic Systems, vol. 49, no. 2, pp. 1073–1090, 2013.", + "[197] Z. Wu, R. Liu, and H. Cao, \"Ecdsa-based message authentication scheme for beidou-ii navigation satellite system,\" IEEE Transactions on Aerospace and Electronic Systems, vol. 55, no. 4, pp. 1666-1682, 2019.", + "[198] K. Wesson, M. Rothlisberger, and T. Humphreys, “Practical cryptographic civilgps signal authentication,” NAVIGATION: Journal of the Institute of Navigation, vol. 59, no. 3, pp. 177–193, 2012.", + "[199] A. Ranganathan, H. Olafsdóttir, and S. Capkun, \"Spree: a spoofing resistant gps receiver,\" in Proceedings of the 22nd Annual International Conference on Mobile Computing and Networking, ser. MobiCom '16. New York, NY, USA: Association for Computing Machinery, 2016, p. 348-360. [Online]. Available: https://doi.org/10.1145/2973750.2973753", + "[200] M. Ahmed, A. A. Soofi, S. Raza, F. Khan, S. Ahmad, W. U. Khan, M. Asif, F. Xu, and Z. Han, “Advancements in ris-assisted UAV for empowering multiaccess edge computing: A survey,” IEEE Internet of Things Journal, vol. 12, no. 6, pp. 6325–6346, 2025.", + "[201] G. K. Pandey, D. S. Gurjar, S. Yadav, Y. Jiang, and C. Yuen, “Uav-assisted communications with rf energy harvesting: A comprehensive survey,” IEEE Communications Surveys & Tutorials, pp. 1-1, 2024.", + "[202] P. Cao, L. Lei, S. Cai, G. Shen, X. Liu, X. Wang, L. Zhang, L. Zhou, and M. Guizani, \"Computational intelligence algorithms for uav swarm networking and collaboration: A comprehensive survey and future directions,\" IEEE Communications Surveys & Tutorials, vol. 26, no. 4, pp. 2684-2728, 2024.", + "[203] P. Li, H. Zhang, Y. Wu, L. Qian, R. Yu, D. Niyato, and X. Shen, \"Filling the missing: Exploring generative ai for enhanced federated learning over heterogeneous mobile edge devices,\" IEEE Transactions on Mobile Computing, vol. 23, no. 10, pp. 10001-10015, 2024.", + "[204] J. Wang, Y. Liu, H. Du, D. Niyato, J. Kang, H. Zhou, and D. I. Kim, \"Empowering wireless networks with artificial intelligence generated graph,\" arXiv preprint arXiv:2405.04907, 2024." + ], + "bbox": [ + 76, + 71, + 491, + 943 + ], + "page_idx": 30 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[205] M. Xu, D. Niyato, J. Kang, Z. Xiong, S. Mao, Z. Han, D. I. Kim, and K. B. Letaief, \"When large language model agents meet 6g networks: Perception, grounding, and alignment,\" IEEE Wireless Communications, vol. 31, no. 6, pp. 63-71, 2024.", + "[206] R. Zhang, H. Du, D. Niyato, J. Kang, Z. Xiong, P. Zhang, and D. I. Kim, \"Optimizing generative ai networking: A dual perspective with multi-agent systems and mixture of experts,\" arXiv preprint arXiv:2405.12472, 2024.", + "[207] A. H. Arani, P. Hu, and Y. Zhu, “Uav-assisted space-air-ground integrated networks: A technical review of recent learning algorithms,” IEEE Open Journal of Vehicular Technology, vol. 5, pp. 1004–1023, 2024.", + "[208] N. T. T. Van, N. L. Tuan, N. C. Luong, T. H. Nguyen, S. Feng, S. Gong, D. Niyato, and D. I. Kim, \"Network access selection for urclc and embb applications in sub-6ghz-mmwave-thz networks: Game theory versus multi-agent reinforcement learning,\" IEEE Transactions on Communications, pp. 1-1, 2024.", + "[209] Q. Yuan, L. Xiao, C. He, P. Xiao, and T. Jiang, \"Deep learning-based hybrid precoding for ris-aided broadband terahertz communication systems in the face of beam squint,\" IEEE Wireless Communications Letters, vol. 13, no. 2, pp. 303-307, 2024.", + "[210] G. Geraci, A. Garcia-Rodriguez, M. M. Azari, A. Lozano, M. Mezzavilla, S. Chatzinotas, Y. Chen, S. Rangan, and M. D. Renzo, \"What will the future of uav cellular communications be? a flight from 5g to 6g,\" IEEE Communications Surveys & Tutorials, vol. 24, no. 3, pp. 1304-1335, 2022." + ], + "bbox": [ + 506, + 71, + 919, + 364 + ], + "page_idx": 30 + }, + { + "type": "page_number", + "text": "31", + "bbox": [ + 905, + 31, + 919, + 40 + ], + "page_idx": 30 + } +] \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09153/041c4cb9-df29-4fd7-8ac4-53350c684566_model.json b/data/2025/2504_09xxx/2504.09153/041c4cb9-df29-4fd7-8ac4-53350c684566_model.json new file mode 100644 index 0000000000000000000000000000000000000000..4666c9517d59154ae1a8ce70f20c75000530e8e6 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/041c4cb9-df29-4fd7-8ac4-53350c684566_model.json @@ -0,0 +1,5894 @@ +[ + [ + { + "type": "page_number", + "bbox": [ + 0.912, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "1" + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.277, + 0.06, + 0.721 + ], + "angle": 270, + "content": "arXiv:2504.09153v1 [cs.CR] 12 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.12, + 0.071, + 0.877, + 0.141 + ], + "angle": 0, + "content": "Secure Physical Layer Communications for Low-Altitude Economy Networking: A Survey" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.148, + 0.908, + 0.184 + ], + "angle": 0, + "content": "Lingyi Cai, Jiacheng Wang, Ruichen Zhang, Yu Zhang, Tao Jiang, Fellow, IEEE, Dusit Niyato, Fellow, IEEE, Xianbin Wang, Fellow, IEEE, Abbas Jamalipour, Fellow, IEEE, and Xuemin Shen, Fellow, IEEE" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.236, + 0.493, + 0.552 + ], + "angle": 0, + "content": "Abstract—The Low-Altitude Economy Networking (LAENet) is emerging as a transformative paradigm that enables an integrated and sophisticated communication infrastructure to support aerial vehicles in carrying out a wide range of economic activities within low-altitude airspace. However, the physical layer communications in the LAENet face growing security threats due to inherent characteristics of aerial communication environments, such as signal broadcast nature and channel openness. These challenges highlight the urgent need for safeguarding communication confidentiality, availability, and integrity. In view of the above, this survey comprehensively reviews existing secure countermeasures for physical layer communication in the LAENet. We explore core methods focusing on anti-eavesdropping and authentication for ensuring communication confidentiality. Subsequently, availability-enhancing techniques are thoroughly discussed for anti-jamming and spoofing defense. Then, we review approaches for safeguarding integrity through anomaly detection and injection protection. Furthermore, we discuss future research directions, emphasizing energy-efficient physical layer security, multi-drone collaboration for secure communication, AI-driven security defense strategy, space-air-ground integrated security architecture, and 6G-enabled secure UAV communication. This survey may provide valuable references and new insights for researchers in the field of secure physical layer communication for the LAENet." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.558, + 0.492, + 0.599 + ], + "angle": 0, + "content": "Index Terms—Low-altitude economy networking, secure physical layer communications, communication confidentiality, communication availability, communication integrity." + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.616, + 0.352, + 0.63 + ], + "angle": 0, + "content": "I. INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.635, + 0.493, + 0.696 + ], + "angle": 0, + "content": "WITH the rapid development of aerial vehicle technologies and communication networks, the concept of Low-Altitude Economic Networking (LAENet) has emerged to enable more comprehensive, large-scale, and intelligent" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.705, + 0.493, + 0.763 + ], + "angle": 0, + "content": "Lingyi Cai is with the Research Center of 6G Mobile Communications, School of Cyber Science and Engineering, Huazhong University of Science and Technology, Wuhan, 430074, China, and also with the College of Computing and Data Science, Nanyang Technological University, Singapore (e-mail: lingyicai@hust.edu.cn)." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.762, + 0.493, + 0.809 + ], + "angle": 0, + "content": "Jiacheng Wang, Ruichen Zhang, and Dusit Niyato are with the College of Computing and Data Science, Nanyang Technological University, Singapore (e-mails: jiacheng.wang@ntu.edu.sg; ruichen.zhang@ntu.edu.sg; dniyato@ntu.edu.sg)." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.808, + 0.493, + 0.855 + ], + "angle": 0, + "content": "Yu Zhang and Tao Jiang are with the Research Center of 6G Mobile Communications, School of Cyber Science and Engineering, Huazhong University of Science and Technology, Wuhan, 430074, China (e-mail: yuzhang123@hust.edu.cn; tao.jiang@ieee.org)." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.853, + 0.493, + 0.888 + ], + "angle": 0, + "content": "Xianbin Wang is with the Department of Electrical and Computer Engineering, Western University, London, ON, N6A 5B9, Canada (e-mail: xianbin.wang@uwo.ca)." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.887, + 0.493, + 0.91 + ], + "angle": 0, + "content": "Abbas Jamalipour is with the School of Electrical and Computer Engineering, University of Sydney, Australia (e-mail: a.jamalipour@ieee.org)." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.91, + 0.493, + 0.945 + ], + "angle": 0, + "content": "Xuemin Shen is with the Department of Electrical and Computer Engineering, University of Waterloo, Waterloo, ON N2L 3G1, Canada (e-mail: sshen@uwaterloo.ca)." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.235, + 0.923, + 0.507 + ], + "angle": 0, + "content": "connectivity to support various low-altitude activities [1]–[4], such as intelligent transportation, logistics delivery, communication enhancement, disaster monitoring, and emergency response [5]–[8], as shown in Fig. 1. The LAENet is built upon earlier frameworks of single Unmanned Aerial Vehicle (UAV) operation and multi-UAV networks. A single UAV typically maintains a direct link to a ground station or base station, operating with simple control procedures and delivering cost-effective services but with limited range and scalability [9]. The UAV network focuses on formation control and multi-UAV collaboration, enabling broader mission areas and stronger fault tolerance [9]–[11]. Advancing from these foundations, the LAENet integrates various aerial vehicles into a high-density communication network, connecting them not only to ground stations but also to other platforms such as base stations, access points, and even satellites [12], [13]. Thus, the LAENet can enable ubiquitous coverage, high reliability, robust fault tolerance, greater autonomy, and intelligence." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.508, + 0.923, + 0.853 + ], + "angle": 0, + "content": "Specifically, the LAENet refers to an integrated network system that connects various low-altitude flight operations, including general aviation, drones, electric vertical take-off and landing (eVTOL) aircraft, and other aerial platforms, within the designated low-altitude airspace (typically below 1,000 meters, and in some cases extending up to 3,000 meters) [1], [13]. The LAENet serves as a vital bridge between ground-based economies and airspace resources, which will drive technological innovation and unlock substantial social and economic benefits [14], [15]. The Civil Aviation Administration of China estimates that the country's low-altitude market will soar from 500 billion Chinese yuan (about 70 billion US dollars) in 2023 to 1.5 trillion Chinese yuan (about 200 billion US dollars) in 2025 and as much as 3.5 trillion Chinese yuan (about 480 billion US dollars) in 2035 [16]. Currently, research institutions and enterprises across multiple regions in China are continuously advancing and expanding innovative research and commercial applications of UAVs and eVTOLs in low-altitude activities [17]. Meanwhile, in the United States, the Federal Aviation Administration has confirmed its commitment to actively promoting the development of electric air taxis and integrating this type of aircraft into the national airspace [18]." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.855, + 0.924, + 0.947 + ], + "angle": 0, + "content": "In the LAENet, physical layer communication serves as a critical foundation for wireless communication between aerial vehicles and between aerial vehicles and communication infrastructure [10], [28], [31]. The physical layer converts digital data from higher protocol layers into signals suitable for transmission over aerial communication channels [32]–[34]." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.912, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "2" + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.068, + 0.92, + 0.407 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.074, + 0.419, + 0.924, + 0.456 + ], + "angle": 0, + "content": "Fig. 1. The overall architecture of the LAENet covers the main application scenarios, including emergency monitoring and response, temporary communication relay, communication coverage expansion, low-altitude smart logistics, and urban air mobility. The table compares the similarities and differences between the LAENet, single UAV, and UAV networks, representing the evolution of the LAENet." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.481, + 0.492, + 0.692 + ], + "angle": 0, + "content": "This process encompasses encoding data into bit sequences, modulating them onto carrier waves, and ensuring reliable signal propagation through the wireless medium [32], [35], [36]. At the receiver side, the physical layer performs inverse operations, including demodulating the incoming signals, decoding the bit sequences, and passing the data to upper layers for further processing [37]–[39]. Therefore, the physical layer supports the core communication mechanisms in the LAENet and plays a crucial role in its aerial deployment. For example, aerial vehicles deployed as aerial base stations (ABSs) or aerial relays can overcome interference, signal distortion, and environmental variations inherent in communication links by using physical layer functionalities such as channel access, multiplexing, and channel equalization [33], [40], [41]." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.703, + 0.493, + 0.945 + ], + "angle": 0, + "content": "However, physical layer communication in the LAENet is exposed to a variety of security threats due to the inherent characteristics of aerial communication environments [42]. The broadcast nature of wireless signals and the prevalence of line-of-sight (LoS) propagation make aerial links particularly vulnerable to eavesdropping, jamming, and spoofing attacks [1], [43]. These attacks can compromise communication confidentiality, disrupt communication, or deceive aerial vehicles by impersonating legitimate transmitters [44], [45]. Furthermore, the openness of wireless channels and weak authentication mechanisms increase the risk of unauthorized access and injection attacks, allowing adversaries to infiltrate the network or inject malicious signals [46], [47]. Additionally, the open medium and dynamic spectrum access may cause anomalous behaviors to disrupt normal communication operations in the LAENet [48], [49]." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.481, + 0.922, + 0.585 + ], + "angle": 0, + "content": "Confronted with these substantial security challenges, this paper conducts a comprehensive analysis on physical layer communications of the LAENet and provides a thorough survey of technologies and solutions to address communication confidentiality, availability, and integrity. Table II gives a clear structure for showing existing efforts on secure physical layer communications for the LAENet." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.608, + 0.64, + 0.624 + ], + "angle": 0, + "content": "A. Related Surveys" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.628, + 0.924, + 0.947 + ], + "angle": 0, + "content": "Recently, a number of excellent survey and tutorial papers have overviewed security issues in UAV networks and communications and have summarized corresponding countermeasures and solutions, as shown in Table I. Some works consider security issues at the system level including intrusion, privacy, and trust issues. The work in [19] provides a comprehensive review of security threats facing UAVs and UAV networks, including communication vulnerabilities, sensor spoofing, jamming, and malware attacks. It examines various countermeasures such as encryption, global positioning system (GPS) spoofing mitigation, and firmware signing. A gap analysis is performed to identify remaining security vulnerabilities and provide recommendations for future UAV development. The study in [20] conducts a comprehensive review of security issues in UAV swarm networks, examining various potential attacks such as communication attacks, identity-based attacks, resource attacks, routing attacks, data attacks, and machine learning (ML) attacks. It categorizes these threats and presents corresponding security technologies and countermeasures, including cryptography, physical layer security techniques, blockchain, machine learning, and intrusion detection" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.912, + 0.032, + 0.921, + 0.041 + ], + "angle": 0, + "content": "3" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.072, + 0.397, + 0.094 + ], + "angle": 0, + "content": "TABLEI SUMMARY OF RELATED SURVEYS" + }, + { + "type": "table", + "bbox": [ + 0.081, + 0.104, + 0.487, + 0.532 + ], + "angle": 0, + "content": "
ReferencesFocus
[19]A review of cybersecurity threats, countermeasures, and research gaps in UAV networks, with a focus on emerging attack surfaces and commercial UAV applications
[20]A survey of security threats, vulnerabilities, and countermeasures in UAV swarm networks, with a focus on classifying attack types and reviewing emerging defense technologies
[21]A review of security threats, vulnerabilities, and countermeasures in UAVs and Flying Ad Hoc Networks with attack surface analysis with simulation-based evaluation
[22]A survey of vulnerabilities across software, hardware, and communication layers in UAV systems, and an exploration of emerging defense technologies
[23]A survey of security challenges in drone communication and a review of emerging technologies used to enhance the speed, reliability, and security of UAV networks
[24]A review of UAV security challenges, existing controls, and future research directions, with an emphasis on the transformative role of AI in enabling secure UAV systems
[25]A review of security threats classified from a cyberspace security perspective and countermeasures in UAV systems
[26]A survey of security threats, requirements, and counter-measures in UAV-aided Internet of Things (IoT) applications
[27]A survey of cybersecurity vulnerabilities and countermeasures in UAV systems, integrating threat classification, communication protocols, and emerging techniques
[28]A survey of PLS in UAV communications, focusing on key challenges, methodologies, and recent advancements for both static and mobile UAV deployment scenarios
[29]A review of security challenges, practical deployment aspects, and standardization progress associated with integrating UAVs into cellular networks
[30]A survey of layer-wise cybersecurity threats and AI-enabled countermeasures in UAV-assisted IoT applications
" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.565, + 0.492, + 0.716 + ], + "angle": 0, + "content": "systems. The authors in [21] provide a detailed examination of security challenges in UAVs and FANETs, covering various attack vectors including communication, identity-based, resource, routing, data, and machine learning attacks. The study in [22] examines security and privacy vulnerabilities in UAV systems across hardware, software, and communication layers. It discusses various threats such as eavesdropping and jamming attacks, and presents defense mechanisms including blockchain, machine learning-based intrusion detection, and secure communication protocols." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.719, + 0.493, + 0.947 + ], + "angle": 0, + "content": "Some studies emphasize cyber security challenges within UAV networks. The study in [23] comprehensively reviews security issues in drone communication, including Denial of Service (DoS), GPS spoofing, and man-in-the-middle attacks. It examines vulnerabilities across different drone applications and presents countermeasures using blockchain, software-defined networks, machine learning, and fog computing. The authors of [24] provide a comprehensive survey of security challenges in UAV systems, including various types of attacks, privacy concerns, and trust issues. It identifies current research trends and gaps while establishing a future roadmap with a focus on artificial intelligence (AI)'s potential to enhance UAV security. The authors in [25] provide a comprehensive review of security issues in UAV networks, examining various potential attacks such as spoofing, replay, jamming, and" + }, + { + "type": "title", + "bbox": [ + 0.685, + 0.072, + 0.742, + 0.083 + ], + "angle": 0, + "content": "TABLE II" + }, + { + "type": "title", + "bbox": [ + 0.626, + 0.085, + 0.802, + 0.094 + ], + "angle": 0, + "content": "CHALLENGES AND SOLUTIONS" + }, + { + "type": "text", + "bbox": [ + 0.527, + 0.096, + 0.902, + 0.14 + ], + "angle": 0, + "content": "RED CIRCLES DESCRIBE THE SECURITY ISSUES; GREEN CIRCLES REPRESENT THE OVERALL COUNTERMEASURES FOR THE SECURITY ISSUES; GREEN CHECK MARKERS INDICATE DIFFERENT TYPES OF SOLUTIONS UNDER EACH COUNTERMEASURE" + }, + { + "type": "table", + "bbox": [ + 0.508, + 0.149, + 0.921, + 0.496 + ], + "angle": 0, + "content": "
Section III, Challenge 1: Communication confidentiality
Issues● Eavesdropping attack [46], [50]● Unauthorized access [31], [51], [52]
Solutions● Anti-eavesdropping strategies√ Convex optimization-based strategies [53]–[59]√ Reinforcement learning-based strategies [60]–[65]√ Deep learning-based strategies [66]–[71]● Communication authentication√ PUFs-based authentication [72]–[74]√ Channel based-authentication [75]–[77]
Section IV, Challenge 2: Communication availability
Issues● Jamming attack [48], [78], [79]● Spoofing attack [49], [50], [52], [78]
Solutions● Anti-jamming strategies√ Convex optimization [80]–[82]√ Single-agent RL [83]–[86]√ Multi-agent RL [87]–[89]● Spoofing defense√ PLA [77], [90], [91]√ GNSS spoofing detection [92]–[94]
Section V, Challenge 3: Communication Integrity
Issues● Anomalous behaviors [61], [95], [96]● Injection attacks [28], [46], [97]
Solutions● Anomaly detection√ Jamming anomaly detection [98]–[101]√ Abnormal power detection [102]√ Eavesdropping anomaly detection [103]● Injection defense√ Jamming signal injection defense [98], [101], [104]● Spoofing signal injection defense [105]–[107]
" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.545, + 0.922, + 0.651 + ], + "angle": 0, + "content": "eavesdropping attacks. It categorizes these threats and presents corresponding security technologies and countermeasures. The study in [26] provides a comprehensive review of security issues in UAV-aided IoT applications and presents corresponding security technologies and countermeasures. The work in [27] reviews cybersecurity threats affecting UAV systems and evaluates existing countermeasures in enhancing UAV security." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.658, + 0.922, + 0.946 + ], + "angle": 0, + "content": "In addition, some surveys analyze the challenges faced by UAV systems from a layered perspective (e.g., physical layer, link layer, network layer, application layer). The work in [28] deeply reviews the current state of physical layer security (PLS) in UAV communications, examining unique air-to-ground channel characteristics, static and mobile UAV deployment scenarios, and various security enhancement techniques. The work in [29] presents a comprehensive overview of UAV cellular communications, covering the classification of consumer drones, the concept and potential of UAV-mounted flying base stations. It explores the integration of UAVs into cellular networks as novel user equipment and addresses key challenges related to interference, regulatory compliance, and security. The authors of [30] review the cybersecurity landscape of UAV-assisted IoT applications, examining layer-wise security threats from physical to application layers. It explores how AI, ML, deep learning (DL), and reinforcement learning (RL) techniques have been employed to address authentication, data privacy, and attack prevention challenges." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.912, + 0.032, + 0.921, + 0.04 + ], + "angle": 0, + "content": "4" + }, + { + "type": "table_caption", + "bbox": [ + 0.468, + 0.072, + 0.531, + 0.082 + ], + "angle": 0, + "content": "TABLE III" + }, + { + "type": "table_caption", + "bbox": [ + 0.415, + 0.083, + 0.584, + 0.095 + ], + "angle": 0, + "content": "LIST OF ABBREVIATIONS" + }, + { + "type": "table", + "bbox": [ + 0.182, + 0.104, + 0.818, + 0.418 + ], + "angle": 0, + "content": "
AbbreviationDescriptionAbbreviationDescription
A2GAir-to-groundABSAerial Base Station
ANArtificial NoiseAIArtificial Intelligence
BCDBlock Coordinate DescentBSBase Station
CNNConvolutional Neural NetworkCSIChannel State Information
DDPGDeep Deterministic Policy GradientDDQNDouble-deep Q-Learning
DLDeep LearningDNNDeep Neural Network
DQNDeep Q-NetworkeVTOLElectric Vertical Take-off and Landing
DRLDeep Reinforcement LearningFARFalse Alarm Rate
G2AGround-to-airG2UGround-to-UAV
GANGenerative Adversarial NetworkGNSSGlobal Navigation Satellite System
GPSGlobal Positioning SystemGSGround Station
IoTInternet of ThingsLAENetLow-Altitude Economy Networking
LSTMLong Short-Term MemoryLoSLine-of-sight
MARLMulti-agent Reinforcement LearningMDPMarkov Decision Process
MDRMiss Detection RateMECMobile Edge Computing
MLMachine LearningMSEMean Square Error
NOMANon-orthogonal Multiple AccessPLAPhysical-layer Authentication
PLSPhysical Layer SecurityPUFPhysical Unclonable Function
QoEQuality of ExperienceRFRadio Frequency
RISReconfigurable Intelligent SurfacesRLReinforcement Learning
RNNRecurrent Neural NetworkRSSReceived Signal Strength
SCASuccessive Convex ApproximationSDNRSignal-to-disturbance-plus-noise Ratio
SNRSignal-to-noise RatioSOCSecond-Order Cone
TDMATime-division Multiple AccessTHzTerahertz
U2GUAV-to-ground CommunicationUAVUnmanned Aerial Vehicle
" + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.444, + 0.295, + 0.459 + ], + "angle": 0, + "content": "B. Contributions of Our Survey" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.469, + 0.492, + 0.71 + ], + "angle": 0, + "content": "The related surveys and tutorials primarily focus on the classification of overall security threats and corresponding countermeasures in UAV networks or UAV-assisted applications, with relatively little attention given to security issues of communication in the physical layer. Different from existing studies, our survey uniquely concentrates on the security challenges specific to physical layer communications in the LAENet, as summarized in Table II. It fills a critical gap in the literature by conducting an in-depth analysis of threats in physical layer communications that were previously underexplored or only briefly mentioned in prior studies. By offering a comprehensive and systematic analysis of these underexplored issues, our work brings new insights to seek effective solutions to enhance physical layer security in communications of the LAENet. The key contributions of this paper are summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.712, + 0.491, + 0.742 + ], + "angle": 0, + "content": "The key contributions of this paper are summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.749, + 0.49, + 0.884 + ], + "angle": 0, + "content": "- A thorough discussion of the six main security issues in the physical layer communication of the LAENet is presented, namely, eavesdropping attack, unauthorized access, jamming attack, spoofing attack, anomalous behaviors, and injection attack. We analyze these attacks in the context of their potential occurrence throughout the entire operation of LAENet, providing essential references for ensuring the security of physical layer communication in the future LAENet deployments." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.885, + 0.492, + 0.947 + ], + "angle": 0, + "content": "- We review countermeasures against various attacks in detail and offer a comprehensive tutorial on achieving communication confidentiality, communication availability, and communication integrity in LAENet. In addition," + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.749, + 0.492, + 0.947 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.536, + 0.444, + 0.922, + 0.489 + ], + "angle": 0, + "content": "the lessons learned for each security issue are presented to emphasize the limitations of existing works and provide high-level insights for improvements." + }, + { + "type": "text", + "bbox": [ + 0.522, + 0.49, + 0.923, + 0.625 + ], + "angle": 0, + "content": "- Several potential future research directions for secure physical layer communication in LAENet are proposed, including energy-efficient physical layer security, multi-drone collaboration for secure communication, AI-driven security defense strategy, space-air-ground integrated security architecture, and 6G-enabled secure UAV communication. These diverse perspectives offer new guidance for future research on secure physical layer communication in LAENet." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.626, + 0.923, + 0.793 + ], + "angle": 0, + "content": "The remainder of this paper is given as follows. Section II introduces the background of the LAENet and security issues in physical layer communication of the LAENet. In Section III, a comprehensive exploration of achieving communication confidentiality for the LAENet is presented. Section IV reviews the solutions for communication availability in the LAENet. In Section V, countermeasures on communication integrity for the LAENet are discussed. Section VI provides future research directions, and Section VII concludes this paper. Additionally, Table III lists the abbreviations commonly employed throughout this survey." + }, + { + "type": "title", + "bbox": [ + 0.599, + 0.804, + 0.828, + 0.817 + ], + "angle": 0, + "content": "II. BACKGROUND KNOWLEDGE" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.822, + 0.922, + 0.883 + ], + "angle": 0, + "content": "In this section, we introduce the background of the LAENet, including its definition and application scenarios. Subsequently, the concept of physical layer communication in the LAENet and its security threats are elaborated in detail." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.897, + 0.688, + 0.911 + ], + "angle": 0, + "content": "A. Background of LAENet" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.915, + 0.922, + 0.946 + ], + "angle": 0, + "content": "The LAENet is a sophisticated and dynamic system that integrates various aerial and terrestrial technologies to en" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.912, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "5" + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.071, + 0.917, + 0.429 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.074, + 0.443, + 0.925, + 0.492 + ], + "angle": 0, + "content": "Fig. 2. Background knowledge of the LAENet and security issues in its physical layer communication. Describe the definition of the LAENet and its communication application scenarios. Elaborate on three key metrics for secure physical layer communication: communication confidentiality, which combats eavesdropping attacks and unauthorized access; anti-jamming strategies and spoofing defense for ensuring communication availability; and anomaly detection and injection defense to prevent adversaries from compromising communication integrity." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.516, + 0.492, + 0.698 + ], + "angle": 0, + "content": "able seamless communication, coordination, and management of diverse aerial operations within low-altitude airspace [1], [108]. The LAENet includes numerous different types of constituents, such as flight equipment, base stations, and other communication platforms. Specifically, the LAENet connects various aerial vehicles, including general aviation aircraft for passenger transport and emergency rescue, drones for surveillance and logistics, and eVTOL designed for urban air mobility and last-mile cargo delivery [109], [110]. These aerial vehicles can incorporate ground and aerial base stations, further high-altitude platforms, such as weather balloons and satellites, to receive environmental information and precise navigation [13]." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.704, + 0.493, + 0.946 + ], + "angle": 0, + "content": "Different from traditional aviation networks that rely on centralized air traffic control, the LAENet can independently construct communication and networking by seamlessly interconnecting a variety of aerial and ground-based systems, which enables continuous information exchange, flight path optimization, and autonomous operations [8], [111]. Therefore, the LAENet has opened opportunities for various application scenarios and plays key roles from the perspective of communication coverage and relay [112]–[114]. Specifically, the LAENet can extend the communication coverage by deploying aircraft as ABSs in areas lacking communication infrastructure [115]–[117]. For instance, these ABSs deployed at optimal altitudes can provide connectivity and network services in remote or disaster-stricken areas [118], [119]. Moreover, if the direct communication links between ground base stations and user equipment are unreliable, such as in" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.516, + 0.921, + 0.621 + ], + "angle": 0, + "content": "mountainous regions and densely populated areas, the aircraft can act as mobile relays to improve connectivity by capturing, amplifying, and transmitting communication signals [120]–[122]. It also can be regarded as a surveillance unit to monitor airspace dynamics while simultaneously functioning as a low-altitude network orchestrator to optimize communication and computing resources [118], [123], [124]." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.623, + 0.922, + 0.835 + ], + "angle": 0, + "content": "To integrate and evolve these capabilities, the LAENet needs to establish effective communication infrastructure to ensure reliable connectivity and efficient interaction across various environments [31], [125]. Physical layer communication, as the bottom layer in the network architecture, may directly influence the communication performance of the LAENet across aerial and terrestrial networks [43], [46]. For example, it governs how signals are generated, transmitted, and received between aircraft and base stations [31]. Building on this, it manages the channel and spectrum resources to enhance signal transmission quality and maintain stable connectivity [43]. Therefore, ensuring the security of physical layer communication in the LAENet is crucial for supporting a wide range of applications in low-altitude domains." + }, + { + "type": "title", + "bbox": [ + 0.504, + 0.863, + 0.924, + 0.891 + ], + "angle": 0, + "content": "B. Security Issues in Physical Layer Communication of LAENet" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.899, + 0.922, + 0.946 + ], + "angle": 0, + "content": "Based on previous studies [126], [127], we discuss the security issues in the physical layer communication of the LAENet from three aspects: confidentiality, availability, and" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.912, + 0.032, + 0.921, + 0.04 + ], + "angle": 0, + "content": "6" + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.07, + 0.49, + 0.098 + ], + "angle": 0, + "content": "integrity of communications. The details of each measurement are described as follows." + }, + { + "type": "text", + "bbox": [ + 0.096, + 0.13, + 0.49, + 0.325 + ], + "angle": 0, + "content": "- The confidentiality of physical layer communications in the LAENet can be compromised by security threats such as eavesdropping and unauthorized access [128]. Eavesdropping arises primarily from the broadcast nature of wireless signals and LoS link, making transmissions highly susceptible to interception [46]. An eavesdropper silently capturing or intercepting signals can lead to the exposure of confidential information. Meanwhile, unauthorized access threats exploit the open and broadcast nature of UAV communications [31]. Attackers may gain illegal access to the LAENet by disguising themselves as legitimate UAVs or ground stations, thereby deceiving or interfering with the normal operation of UAVs [51]." + }, + { + "type": "text", + "bbox": [ + 0.096, + 0.327, + 0.49, + 0.505 + ], + "angle": 0, + "content": "- Similarly, the open nature of wireless channels and LoS propagation bring jamming and spoofing security issues for communication availability [78]. Specifically, jammers can continuously transmit interference signals to disrupt communication, where a jammer can be a drone or a base station [48]. The spoofing attack can not only achieve identity spoofing by forging legitimate transmission identities but also launch signal deception attacks to disrupt UAV communications and positioning [49]. Therefore, jamming and spoofing lead to unauthorized access and signal disruptions or errors, making communication unavailable in the LAENet." + }, + { + "type": "text", + "bbox": [ + 0.096, + 0.508, + 0.49, + 0.763 + ], + "angle": 0, + "content": "- Integrity as a microscopic metric measures the deviations of signals, channels, and spectrum in communication under adversaries' influence [129]. The communication integrity of the LAENet can be affected by anomalous behaviors and injection attacks. Anomalous behaviors often use dynamic spectrum access and the open wireless medium, including abnormal jamming, abnormal transmission power, and covert eavesdropping [95]. These anomalous behaviors can introduce harmful interference, violate spectrum policies, and expose sensitive information to eavesdroppers [61], [96]. Moreover, the injection attack exploits the open nature of wireless channels to alter signals or inject illegal signals, such as spoofing signals or malicious GNSS signals, to deceive receivers and interfere with communication, thereby leading to degraded signal quality, false navigation, and network congestion [28], [46], [97]." + }, + { + "type": "list", + "bbox": [ + 0.096, + 0.13, + 0.49, + 0.763 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.794, + 0.49, + 0.944 + ], + "angle": 0, + "content": "Overall, as illustrated in Fig. 2, this survey reviews existing research on achieving communication confidentiality, availability, and integrity for the LAENet. Specifically, the investigation of anti-jamming strategies and communication authentication schemes aims to enhance communication confidentiality. Studies on anti-jamming techniques and spoofing defense mechanisms have been explored to ensure communication availability. Furthermore, research on communication integrity has focused on anomaly detection and injection attack mitigation approaches." + }, + { + "type": "title", + "bbox": [ + 0.519, + 0.07, + 0.91, + 0.083 + ], + "angle": 0, + "content": "III. COMMUNICATION CONFIDENTIALITY FOR LAENET" + }, + { + "type": "title", + "bbox": [ + 0.508, + 0.093, + 0.719, + 0.107 + ], + "angle": 0, + "content": "A. Anti-eavesdropping Strategy" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.113, + 0.92, + 0.339 + ], + "angle": 0, + "content": "The LAENet faces significant eavesdropping threats due to the inherent vulnerabilities of UAV-enabled wireless communications. The openness of wireless channels, especially the LoS links in air-to-ground (A2G) and ground-to-air (G2A) communications, increases susceptibility to interception by eavesdroppers that disrupt legitimate communications compared to traditional terrestrial channels [50]. Traditional cryptographic methods, while effective in many scenarios, are less suitable for UAV communications due to their computational complexity and the dynamic mobility of UAVs [130]. This highlights the critical need for robust security measures to ensure the confidentiality and reliability of the LAENet communications. To address these limitations, leveraging PLS techniques to counter eavesdropping threats effectively has emerged as a promising solution [131]-[134]." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.34, + 0.92, + 0.566 + ], + "angle": 0, + "content": "In the LAENet, anti-eavesdropping solutions can leverage the controllable mobility of low-altitude aircraft to enhance physical layer security. By dynamically optimizing their trajectories, low-altitude aircraft can actively adapt their flight paths to shape the communication environment [135]. This approach allows them to fly closer to legitimate ground nodes, strengthening communication links and improving channel conditions for intended receivers, while simultaneously distancing themselves from potential eavesdroppers. In this subsection, we present a critical role of UAV trajectory in forming the communication environment, and how PLS can be enhanced through trajectory optimization and resource allocation to mitigate eavesdropping risks. Our analysis focuses on three prominent methodologies in this domain: convex optimization, deep learning, and reinforcement learning." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.568, + 0.92, + 0.793 + ], + "angle": 0, + "content": "Convex optimization plays a crucial role in addressing anti-eavesdropping challenges in UAV-enabled communication networks, particularly for solving the joint optimization of trajectory and resource allocation [137]. Due to the inherent non-convex nature of these problems, advanced convex optimization techniques such as Successive Convex Approximation (SCA) and Block Coordinate Descent (BCD) are widely utilized [135]. These methods enable UAVs to enhance physical layer security by optimizing flight paths and resource utilization, minimizing the risk of eavesdropping while ensuring secure and efficient communication. Additionally, the decision variables may be discrete, which requires the application of various relaxation methods to transform the complex optimization problem into a more tractable form to obtain efficient solutions [138]." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.794, + 0.92, + 0.944 + ], + "angle": 0, + "content": "The study in [53] explores physical-layer security in UAV-assisted Mobile Edge Computing (MEC) systems in the presence of multiple ground-based eavesdroppers. The proposed system utilizes dual UAVs for task execution and anti-eavesdropping measures. One UAV operates as a mobile MEC server, while the other emits jamming signals to disrupt eavesdroppers, as shown in Fig. 3. The time-division multiple access (TDMA) scheme and non-orthogonal multiple access (NOMA) scheme are proposed to maximize the minimum secure computing capacity by jointly optimizing communica" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.912, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "7" + }, + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.071, + 0.915, + 0.107 + ], + "angle": 0, + "content": "TABLE IV SUMMARY OF CONVEX OPTIMIZATION FOR ANTI-EAVESDROPPING STRATEGY CIRCLES DESCRIBE THE METHODS; CORRECT MARKERS AND CROSS MARKERS REPRESENT PROS AND CONS RESPECTIVELY." + }, + { + "type": "table", + "bbox": [ + 0.078, + 0.117, + 0.92, + 0.452 + ], + "angle": 0, + "content": "
RefOptimization ObjectivesEavesdropper and Jammer TypeOptimizationConstraintsPros & Cons
[53]Secure calculation capacity1UAV jammer and fixed ground eavesdropperTransmit power, time allocation, and computation capacityBCD and P-BCD for secure calculation capacity maximization\n✓ Secure capacity of NOMA and TDMA has been significantly improved\nX High complexity for NOMA due to dual-loop iterations
[54]Secure calculation capacityBase station jammer and fixed ground eavesdropperTransmission power, time allocation, and CPU processing frequencyJDPB algorithm with SCA and BCD for secure task offloading\n✓ Reduce complexity via region division\nX Fixed UAV altitude limits 3D trajectory optimization
[55]Average secrecy rate2Antenna jammer and fixed aerial eavesdropperTransmit power and jamming powerBCD and SCA optimization with hybrid FSO/RF links\n✓ Enhance communication security via hybrid FSO/RF links and AN\nX Rely on simplified channel models (e.g., free-space path loss)
[56]Worst-case secrecy rateUAV jammer and fixed ground eavesdropperUAV speed, collision avoidance, positioning error, and energy harvestingRobust 3D trajectory and time switching optimization\n✓ Full mobility of UAVs in 3D for improving secrecy rate\nX The performance may degrade with flying eavesdroppers
[57]Average secrecy rateNone and flying eavesdropperTransmit power control and user schedulingJoint trajectory and communication design against mobile eavesdroppers\n✓ Initial trajectory design for keeping away from eavesdroppers\nX Security performance relies on the initial trajectory design
[58]Secure calculation capacityGround jammer and flying eavesdropperTransmit power, time slot, computation capacity, UAV speed, and collision avoidanceIntegrate a dual-UAV system with a ground jammer in MEC\n✓ Incorporate the UAV server and UAV eavesdropper with a ground jammer\n✓ Allow a UAV server to hover near ground users for secure offloading\nX Numerous flight constraints may require extensive tuning
[59]Secrecy rateCoastal jammer and flying eavesdropperTransmit power, time slot, computation capacity, UAV speed, and collision avoidanceA secure communication for UAV-relay-assisted maritime MEC\n✓ Simultaneously optimize multiple parameters for improved secrecy rate\nX Iterative decomposition increases the computational burden\nX Assume prior knowledge of Channel State Information (CSI) of devices
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.079, + 0.452, + 0.69, + 0.463 + ], + "angle": 0, + "content": "1Secure calculation capacity is defined as the average number of secure calculation bits in UAV flying time [54]." + }, + { + "type": "table_footnote", + "bbox": [ + 0.078, + 0.462, + 0.847, + 0.475 + ], + "angle": 0, + "content": "2Secrecy rate is defined as the difference between the achievable rate of legitimate UAV's channel and the rate of eavesdropper channel [136]." + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.452, + 0.847, + 0.475 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.501, + 0.492, + 0.879 + ], + "angle": 0, + "content": "tion resources, computation resources, and UAV trajectories. To address the non-convexity of the optimization problem, the problem is transformed into tractable forms via auxiliary variables and decomposition. Specifically, for the TDMA scheme, the problem is decoupled into two sub-problems using BCD. The communication and computation resources are optimized via second-order cone (SOC) constraints and SCA, while UAV trajectories are iteratively updated via first-order Taylor approximations to handle non-convex terms. For the NOMA scheme, a penalized BCD (P-BCD) algorithm is proposed to tackle binary constraints. The problem is split into three blocks that are penalty parameter adjustment, resource allocation via SOC and SCA, and trajectory optimization with convex relaxations. The experimental results demonstrate that the proposed algorithms significantly enhance secure computing capacity, with the NOMA scheme achieving up to about 4.3 Mbps and the TDMA scheme reaching about 4.2 Mbps under optimal conditions. Compared to baselines including the straight flight design and no power control, the proposed strategies improve secure computing capacity by about \\(20\\%\\) to \\(30\\%\\), particularly in scenarios with lower power budgets (e.g., 0.2 W) and higher required computing bits (e.g., 1 Mbps). The convergence of the algorithms is achieved within 20 iterations, which indicates the efficiency in optimizing UAV trajectories and resource allocation for anti-eavesdropping." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.884, + 0.492, + 0.947 + ], + "angle": 0, + "content": "The study in [53] mainly focuses on a dual-UAV-assisted secure MEC system. In some cases, multi-UAV systems hold great promise for collaboratively executing complex tasks while enhancing the secure communications [49], [54]. In the" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.501, + 0.923, + 0.923 + ], + "angle": 0, + "content": "work [54], the joint optimization of task offloading, trajectory planning, and resource allocation for secure communications in multi-UAV MEC systems is studied. Firstly, a base station emits jamming signals to protect against fixed-location ground eavesdroppers. Then, it investigated the joint optimization of task offloading, trajectory planning, and resource allocation for secure communications in multi-UAV MEC systems. The problem is decomposed into two sub-problems: (1) resource allocation and trajectory planning, addressed via SCA and BCD algorithms; (2) offloading decisions, solved through Joint Dynamic Programming and Bidding (JDPB) method. For the first sub-problem, non-convex constraints related to transmission power and UAV trajectory are transformed into convex forms using first-order Taylor expansion and relaxation techniques. Specifically, the transmission power optimization sub-problem is approximated via SCA, while the trajectory planning sub-problem is iteratively solved by introducing auxiliary variables and convex approximations. For the second sub-problem, a bidding mechanism is integrated with dynamic programming to reduce computational complexity by grouping dynamic users into sub-regions. The experimental results demonstrate that the proposed JDPB algorithm achieves a sum average secure calculation capacity of 10.1 Mbps in the first time slot. Additionally, under different settings of time slot sizes, transmission power, and flying speed, the sum average secure calculation capacity achieved by JDPB consistently outperforms baseline schemes such as the Greedy Strategy and the Random Strategy." + }, + { + "type": "text", + "bbox": [ + 0.521, + 0.929, + 0.922, + 0.946 + ], + "angle": 0, + "content": "Unlike the above studies that deal with ground eavesdrop-" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.912, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "8" + }, + { + "type": "image", + "bbox": [ + 0.081, + 0.077, + 0.483, + 0.255 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.081, + 0.277, + 0.473, + 0.449 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.476, + 0.493, + 0.571 + ], + "angle": 0, + "content": "Fig. 3. The overall architecture of the anti-eavesdropping strategy. Part A illustrates the system model against fixed ground eavesdroppers. In this setup, one UAV operates as a mobile server, while another UAV serves as a jammer to emit jamming signals to disrupt the eavesdroppers' interception capabilities. Part B presents the system model for flying eavesdroppers, where one UAV acts as the server, and another UAV functions as a mobile eavesdropper. To mitigate eavesdropping risks, a ground-based jammer actively emits interference signals to secure communications." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.598, + 0.496, + 0.947 + ], + "angle": 0, + "content": "pers, the work in [55] targets threats from aerial eavesdroppers and explores secure communication in a hybrid Free Space Optical (FSO) and Radio Frequency (RF) system. The UAV acts as both a relay and a jammer, emitting artificial noise (AN) during RF transmission to confuse a fixed-position aerial eavesdropper. The work introduces a novel perspective on protecting space-air-ground networks from eavesdropping by leveraging FSO for its inherent resistance to interception and jointly optimizing trajectory design and power allocation to maximize the secrecy rate with two transmission schemes. The first scheme is the slot-based scheme for delay-sensitive data. The trajectory sub-problem is convexified using first-order Taylor expansion to approximate elevation angle and channel gain constraints, while the power allocation sub-problem is transformed into a convex form by introducing a lower bound on transmit power to ensure convexity. The second scheme is the period-based scheme for delay-insensitive data, in which the relaxed constraints on sum secrecy rates over the entire flight period are adopted. A similar SCA method [54] is applied to convexly approximate the non-convex terms in the constraints. Compared to benchmark schemes without jamming power optimization, both methods achieve approximately 0.4 Mbps higher secrecy rates by integrating AN transmission" + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.07, + 0.685, + 0.083 + ], + "angle": 0, + "content": "and hybrid FSO/RF links." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.085, + 0.923, + 0.372 + ], + "angle": 0, + "content": "It is worth noting that most existing studies consider optimizing UAV trajectories on a 2D plane. However, optimizing UAV 3D trajectories may be more practical [139]. The study in [56] considers the UAV's 3D flight trajectory and imperfect knowledge of eavesdroppers' locations, while formulating an optimization approach to maximize the worst-case secrecy rate under various practical constraints, including maximum UAV speed, UAV collision avoidance, UAV positioning error, and UAV energy harvesting. To address the non-convexity of the optimization problem, the original problem is decomposed into multiple sub-problems using BCD and SCA techniques similar to studies in [54] and [55]. By incorporating the additional degree of freedom in the vertical dimension, the proposed approach improves the ability to avoid fixed eavesdropping zones, outperforming 2D trajectory models in maintaining secure communication links under dynamic conditions. Simulation results show that the average secrecy rate of the proposed 3D optimization scheme outperforms that of the fixed-height 2D benchmarks (set at \\(100\\mathrm{m}\\)) by over \\(20\\%\\)." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.372, + 0.923, + 0.537 + ], + "angle": 0, + "content": "Unlike the above studies that focus on fixed ground eavesdroppers, mobile eavesdroppers, such as hostile UAVs, introduce more complex threats due to their ability to maneuver, track, and position for intercept communications [22], [57]. For example, the authors in [57] address the challenges caused by a flying eavesdropper that exploits UAV LOS communication. This work focuses on jointly optimizing the UAV's trajectory, transmit power control, and user scheduling to maximize the minimum average secrecy rate, which enables dynamic adjustments to ensure secure communication even against an mobile eavesdropper." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.538, + 0.923, + 0.884 + ], + "angle": 0, + "content": "Compared to the anti-eavesdropping strategies in [57] that rely heavily on accurate trajectory optimization and resource allocation, the studies in [58], [59] propose using a jammer to actively emit jamming signals, effectively reducing the interception capability of flying eavesdroppers during the computational offloading process of relay UAVs, as shown in Fig. 3. Meanwhile, with the support of SCA and BCD methods similar to [56], the joint optimization problem of UAV trajectories, resource allocation (including transmit power, time slot allocation, and computation capacity), and jamming strategies can be solved while ensuring practical constraints such as flight speed and anti-collision requirements. Importantly, compared to systems targeting fixed ground eavesdroppers, the works in [58], [59] enhance secure calculation capacity or secrecy rate by modeling the trajectories of both the relay UAV and the mobile eavesdropper as dynamic variables optimized over discrete time slots. Specifically, simulation results in [58] demonstrate that the secure calculation capacity of the proposed scheme converges to approximately 2.78 Mbps within 4 iterations, which is significantly higher than the baseline strategy (where only the location of the relay UAV, transmit power, and jamming power are optimized) by approximately 1.6 Mbps." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.885, + 0.924, + 0.947 + ], + "angle": 0, + "content": "Lesson Learned. Convex optimization has emerged as a fundamental tool for developing anti-eavesdropping strategies in UAV-enabled communication systems, particularly for addressing the inherent non-convexity of joint trajectory and" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.912, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "9" + }, + { + "type": "image_caption", + "bbox": [ + 0.084, + 0.073, + 0.256, + 0.084 + ], + "angle": 0, + "content": "Part A. DDQN-based Scheme" + }, + { + "type": "image", + "bbox": [ + 0.086, + 0.09, + 0.563, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.585, + 0.073, + 0.753, + 0.084 + ], + "angle": 0, + "content": "Part B. DDPG-based Scheme" + }, + { + "type": "image", + "bbox": [ + 0.594, + 0.093, + 0.904, + 0.221 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.086, + 0.24, + 0.299, + 0.251 + ], + "angle": 0, + "content": "Part D. MAPPO-LSTM-based Scheme" + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.258, + 0.545, + 0.387 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.586, + 0.24, + 0.771, + 0.251 + ], + "angle": 0, + "content": "Part C. MADDPG-based Scheme" + }, + { + "type": "image", + "bbox": [ + 0.596, + 0.254, + 0.904, + 0.384 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.074, + 0.409, + 0.922, + 0.48 + ], + "angle": 0, + "content": "Fig. 4. The overall architecture of the RL for anti-eavesdropping. Part A describes the DDQN-based scheme, where the system state is used to generate actions through the DDQN network, followed by action execution and obtaining the next state and reward. An experience replay mechanism is employed to store and randomly sample training data. Part B presents the DDPG-based scheme, where actions are generated through Actor and Critic networks, interacting with the environment to obtain rewards. An experience replay buffer is used to store and sample mini-batches. Part C describes the MADDPG-based scheme, involving multiple UAV agents, each with its own Actor and Critic networks, interacting with the environment and sharing rewards. Part D showcases the MAPPO-LSTM-based scheme, where Actor and Critic networks with LSTM layers process time-series data and train through an experience replay buffer." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.505, + 0.492, + 0.776 + ], + "angle": 0, + "content": "resource allocation problems. For fixed eavesdroppers, simpler optimization models with fewer dynamic variables (e.g., 2D trajectory optimization) can achieve secure communication effectively. However, mobile eavesdroppers require more sophisticated formulations, including 3D trajectory optimization and robust constraints to account for uncertainties in eavesdropper positions. Another important insight is the adaptability of convex optimization when combined with complementary methods like artificial noise jamming and resource allocation strategies. By leveraging convex optimization, systems can balance secrecy performance with energy efficiency, ensuring practical applicability in real-world UAV operations. Techniques such as SCA and BCD have proven highly effective in decoupling complex optimization problems into solvable subproblems, allowing iterative refinement toward locally optimal solutions. Overall, convex optimization offers a flexible and mathematically rigorous approach to securing UAV-enabled communication systems for anti-eavesdropping." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.779, + 0.492, + 0.947 + ], + "angle": 0, + "content": "As the number of ground devices increases, along with UAV flight time and the number of optimization variables, the computational complexity of conventional algorithms grows exponentially, leading to infeasibility or suboptimal solutions [140], [141]. Moreover, these methods struggle to adapt to real-time scenarios where UAVs must communicate with mobile users and operate in environments with uncertain or partial information [140], [142]. RL enables UAVs to interact with the environment and autonomously learn optimal policies based on real-time observations [143], as shown in Fig. 4. By leveraging Deep RL (DRL), UAVs can efficiently" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.505, + 0.921, + 0.58 + ], + "angle": 0, + "content": "adapt to changing eavesdropping conditions, optimize secure trajectories, and dynamically allocate resources [144], [145]. This learning-driven approach significantly enhances PLS by ensuring adaptive, scalable, and intelligent anti-eavesdropping strategies in UAV communication networks." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.581, + 0.922, + 0.929 + ], + "angle": 0, + "content": "The study in [60] proposes a Deep Q-Network (DQN)-based approach to address the challenge of securing UAV-assisted multi-user wireless communications against passive eavesdropping attacks. The UAV trajectory optimization is formulated as a Markov Decision Process (MDP), where the state space includes the UAV's 3D coordinates and the positions of users. The action space consists of discrete movements in the \\(x\\), \\(y\\), and \\(z\\) directions, with each action representing a step change in position. The reward function is designed to maximize the legitimate users' rates, defined as the sum of the channel capacities of users served by the UAV. Unlike many prior works that assume perfect knowledge of eavesdropper CSI [53], [59], this study focuses on optimizing legitimate user rates and using the DQN-based approach without requiring full knowledge of the eavesdropping channels. The DQN iteratively optimizes the UAV's trajectory, beamforming matrix, and transmit power allocation, ensuring the UAV dynamically adjusts its position to maximize secrecy capacity. Numerical results show that the secrecy capacity improves with the number of users. The proposed method converges an order of magnitude faster than the Q-learning method and achieves around \\(35\\%\\) higher secrecy capacity than Q-learning after 20,000 episodes." + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.93, + 0.922, + 0.946 + ], + "angle": 0, + "content": "However, the DQN method may face the issue of Q-" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.907, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "10" + }, + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.071, + 0.914, + 0.106 + ], + "angle": 0, + "content": "TABLE V SUMMARY OF RL FOR ANTI-EAVESDROPPING STRATEGY CIRCLES DESCRIBE THE METHODS; CORRECT MARKERS AND CROSS MARKERS REPRESENT PROS AND CONS RESPECTIVELY." + }, + { + "type": "table", + "bbox": [ + 0.078, + 0.117, + 0.927, + 0.381 + ], + "angle": 0, + "content": "
TechniquesReferenceAlgorithmPros & Cons
Value-based RL[60]DQN○ DQN algorithm for UAV trajectory optimization to maximize the secrecy capacity\n✓ Low computational complexity, making it easy to train\n× Q-value overestimation, leading to suboptimal action selection
[61]DDQN○ DDQN-based joint trajectory, time allocation, and offloading optimization\n✓ Accelerated convergence via action space pruning\n✓ Real-time optimization of trajectory and resources\n× DDQN is restricted to discrete action spaces\n× DDQN is not suitable for continuous action control
Policy Gradient-based RL[62]CAA-MADDPG○ Multi-Agent DRL with attention mechanisms (CAA-MADDPG) for secrecy rate maximization\n✓ Handle complex multi-agent with the attention mechanism\n× Assume prior knowledge of eavesdropper locations\n× Assume ground devices are static
[63]DDPG○ DDPG-based RL for enhancing bi-directional UAV communication security\n✓ Support mobile devices and ensure bi-directional secureit\n× Computational overhead increases with device density\n× performance may be sensitive to hyperparameter selection
[64]PPO+DCCN○ Hybrid DCCN and PPO for secrecy rate maximization\n✓ The PPO optimizes the UAV trajectory based on the results from DCCN\n× The performance is sensitive to the choice of clipping factor in PPO
[65]MAPPO+LSTM○ MAPPO for multi-agent cooperative anti-eavesdropping and LSTM-enhanced sequential learning\n✓ The MAPPO+LSTM improves the learning capability of sequential sample data\n× Assume perfect knowledge of CSI may be challenging in real-world scenarios
" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.409, + 0.493, + 0.816 + ], + "angle": 0, + "content": "value overestimation, leading to suboptimal action selection [146]. The authors in [61] propose a double-deep Q-learning (DDQN)-based scheme to jointly optimize the UAV trajectory, time allocation, and offloading decision strategy, aiming to maximize the average secure computing capacity for antieavesdropping in UAV-aided MEC. The system model involves one legitimate UAV server, one illegitimate UAV eavesdropper, one ground jammer, and ground users. The proposed DDQN-based scheme models the optimization problem as an MDP with states, actions, and rewards. The states include the coordinates of the UAVs, while the actions involve offloading decisions, time allocation, and trajectory adjustments. The reward function maximizes secure computing capacity. The DDQN model includes a deep Q-network (QN) and a deep target network (TN) to generate actions and evaluate their values. The parameters of the QN are updated by minimizing the loss function, and the parameters of the TN are periodically updated. The proposed scheme reduces the action space size by deleting illegal actions, such as those that violate time allocation constraints or result in resource waste. Unlike prior works [57], [60] that rely on conventional optimization or DQN with limited consideration of task queues, this approach integrates real-time resource allocation and trajectory optimization while ensuring dynamic constraints. The proposed DDQN scheme converges in 2000 episodes, half the episodes required by DQN (4000 episodes), and achieves a 0.02 Mbits higher average secure computing capacity." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.82, + 0.492, + 0.912 + ], + "angle": 0, + "content": "The value-based RL method (e.g., DQN) mainly focuses on dealing with discrete action spaces that may lead to the loss of optimal solutions [147]. The policy gradient-based RL method (e.g., Deep Deterministic Policy Gradient (DDPG)) can handle continuous action spaces [148], which are more suitable for UAV trajectory and transmit power optimization problems." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.915, + 0.492, + 0.946 + ], + "angle": 0, + "content": "The authors in [62] propose a multi-agent DRL framework to address the challenge of secure UAV communications in" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.408, + 0.923, + 0.635 + ], + "angle": 0, + "content": "the presence of eavesdroppers. The system model is similar to Part A of Fig. 3, where the UAV server sends confidential information to ground users, and UAV jammers send AN signals to ground eavesdroppers using 3D beamforming. This study designs the Multi-Agent Deep Deterministic Policy Gradient with a continuous action attention mechanism (CAA-MADDPG) to maximize the system's secrecy rate. The attention mechanism dynamically prioritizes relevant agents' observations (e.g., jammers focusing on eavesdroppers) to reduce the exploration space and accelerate convergence, thereby enhancing the system's ability to counteract eavesdropping attempts. The simulation results show that CAA-MADDPG achieves a secure rate of \\(4.5\\mathrm{bps / Hz}\\) and converges in 1000 episodes with three UAV jammers, outperforming MADDPG (around \\(4\\mathrm{bps / Hz}\\) and 1400 episodes) and DDPG schemes." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.643, + 0.923, + 0.946 + ], + "angle": 0, + "content": "However, the study in [62] just considers UAV-to-ground communication (U2G) and assumed the ground devices are static. The work in [63] addresses the challenge of securing bi-directional ground-UAV communications in a dynamic environment with mobile ground devices and eavesdroppers. Different from prior works that assume static ground eavesdroppers [54], [56], this study considers mobile ground eavesdroppers for more practical real-world scenarios. The communication in U2G and ground-to-UAV (G2U) cases is modeled, considering factors such as channel gains and distances. The problem of maximizing the worst-case average secrecy rate is formulated as a constrained MDP (CMDP) under the constraints of UAV flight space, flight speed, energy capacity, anti-collision, and peak transmit power. To solve the CMDP, the authors design a DDPG-based RL algorithm. The algorithm includes three key components: the primary network (actor and critic networks), the target network, and the replay buffer. They also adopt state normalization and exploration noise to speed up the training convergence of the DDPG. The proposed joint optimization scheme achieves a secrecy rate" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.908, + 0.032, + 0.92, + 0.04 + ], + "angle": 0, + "content": "11" + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.07, + 0.49, + 0.129 + ], + "angle": 0, + "content": "over \\(40\\%\\) higher compared to baselines that optimize only trajectory or only power. In addition, DDPG outperforms DQN by approximately \\(15\\%\\) in secrecy rate due to its ability to handle continuous actions." + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.13, + 0.49, + 0.205 + ], + "angle": 0, + "content": "The DDPGG methods form a fixed mapping from states to actions, which is not suitable for large state spaces that require more exploration and uncertainty [149]. The PPO alleviates this limitation by introducing proximal policy optimization, which allows for more exploration in the large action space." + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.206, + 0.49, + 0.431 + ], + "angle": 0, + "content": "The study in [64] proposes a hybrid framework (Double Cascade Correlation Network (DCCN) + PPO) to maximize the secrecy capacity. DCCN bypasses the need for labeled training data by cascading two neural networks to maximize the secrecy channel rate. The PPO dynamically adjusts the UAV's position by using clipped surrogate objectives to stabilize policy updates and advantage estimation to prioritize high-reward actions. Simulation results show that the proposed scheme (DCCN + PPO) achieves an average secrecy rate of 0.73 bps/Hz, outperforming the benchmarks DCCN + DDPG (0.67 bps/Hz) and Random RIS + PPO (0.13 bps/Hz). However, the average secrecy continues to decline when the transmit power is higher than 2 W, since the jamming signals transmitted by the secondary source against the eavesdropper will also affect the primary users." + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.433, + 0.49, + 0.823 + ], + "angle": 0, + "content": "The study in [64] considered only the use of one UAV to assist in secure communication. However, in low-altitude economic networks, it may be more important for multiple UAVs to collaborate to implement anti-eavesdropping strategies. The study in [65] considers a system model treats all secondary transmitters and multiple UAV jammers as multi-agents. A Multi-Agent PPO algorithm combined with Long Short-Term Memory (LSTM) networks, named MAPPO-LSTM, is proposed to maximize the secure communication rate by jointly optimizing the UAV trajectory, transmission power, and energy harvesting coefficients. The problem is formulated as a nonconvex MDP consisting of an action space, state space, observation, and reward (which consists of the sum of the secure communication rate, SINR information, and battery capacity). The MAPPO algorithm introduces counterfactual baselines to address the credit assignment problem in centralized learning and combines with the LSTM network to enhance the learning capability of sequential sample data. Compared to benchmark schemes MAPPO and MADDPG, the proposed MAPPO-LSTM method achieved around \\(17\\% - 20\\%\\) higher average secrecy rate in large-scale scenarios, with convergence speeds 1.37 times and 1.93 times faster, respectively. In addition, the reward is sensitive to the discount factor, where setting factor to 0.99 enables faster and more stable convergence. Deviations from this value result in more pronounced fluctuations in the reward and secrecy rate." + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.825, + 0.49, + 0.944 + ], + "angle": 0, + "content": "Lesson Learned. The RL has emerged as a powerful yet challenging tool for anti-eavesdropping in UAV-assisted secure communications. A key lesson is that multi-agent cooperation significantly enhances security compared to single-agent approaches, enabling adaptive trajectory control, power allocation, and jamming coordination in dynamic environments. However, scalability and convergence efficiency remain critical bottlenecks, especially in high-dimensional, time-" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.07, + 0.919, + 0.281 + ], + "angle": 0, + "content": "varying settings, as many studies unrealistically assume perfect channel information, and deep RL's convergence time leaves the system vulnerable before optimization completes. Furthermore, key limitations demand further attention, such as RL's computational complexity, which restricts its use in resource-limited settings requiring real-time security, and its sensitivity to hyperparameter tuning that requires meticulous configuration to ensure optimal performance. Future advancements should focus on developing generalizable and robust learning architectures that can dynamically adapt to evolving threats while maintaining computational feasibility, addressing practical deployment challenges, exploring hybrid approaches, prioritizing security in system design, and balancing security performance with energy consumption." + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.282, + 0.919, + 0.431 + ], + "angle": 0, + "content": "Deep learning, with its strong learning capabilities, parallel processing, and comprehensive reasoning [150]–[152], has huge potential to enhance anti-eavesdropping strategies in UAV communications, especially in environments with rapidly changing conditions and complex interactions [153]. Given the intricate problem of UAV trajectory variation and its nonlinear characteristics in time and space [154], [155], deep learning networks, such as neural networks and generative models, are emerging as potential solutions to improve the security and performance of UAV communication systems." + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.433, + 0.919, + 0.717 + ], + "angle": 0, + "content": "The study in [66] explores the use of deep learning to optimize UAV deployment and jamming strategies against eavesdroppers to maximize the secrecy rate in the complete CSI scenario. The optimization problem is decomposed into two layers: the inner layer optimizes jamming power for a fixed UAV location, and the outer layer optimizes UAV deployment. The inner problem is solved using a bi-section search algorithm, while the outer problem is addressed using a deep neural network (DNN) to approximate the optimal UAV deployment. The DNN is designed as a fully connected structure, which includes an input layer, two hidden layers, and an output layer, as shown in part A of Fig. 5. The DNN is trained using a dataset generated by simulating different UAV deployments and corresponding secrecy rates. The final optimal deployment of the UAV can be approximated when the mean square error of weights between neurons is minimized. The DNN model achieves an average distance error of 2.2 meters compared to the optimal deployment found by the exhaustive search baseline." + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.719, + 0.919, + 0.944 + ], + "angle": 0, + "content": "The fully connected neural network used in [66] is suited for problems where inputs and outputs are fixed-dimensional vectors without inherent spatial or sequential relationships [150]. Moreover, convolutional neural networks (CNNs) and recurrent neural networks (RNNs) can also contribute to antieavesdropping. In contrast to fully connected networks, CNNs are particularly effective for exploring spatial features from images or spatial maps [156]. RNNs, on the other hand, focus on handling sequential data by maintaining a memory of previous inputs through recurrent connections [150]. The authors in [67] propose a CNN-LSTM-based secure efficiency map (SEM) framework, which is constructed by calculating each subarea's security-efficiency index using a weighted exponential coefficient to combine normalized secure spectrum efficiency (secrecy rate per unit bandwidth) and secure energy" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.907, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "12" + }, + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.071, + 0.915, + 0.106 + ], + "angle": 0, + "content": "TABLE VI SUMMARY OF DEEP LEARNING FOR ANTI-EAVESDROPPING STRATEGY CIRCLES DESCRIBE THE METHODS; CORRECT MARKERS AND CROSS MARKERS REPRESENT PROS AND CONS RESPECTIVELY." + }, + { + "type": "table", + "bbox": [ + 0.105, + 0.117, + 0.894, + 0.382 + ], + "angle": 0, + "content": "
TechniquesReferenceAlgorithmPros & Cons
Neural network model[66]DNN● Use DNN to optimize UAV deployment and jamming strategies for secure communication\n● The DNN model reduces the complexity of exhaustive searches\n● Rely on complete statistical channel knowledge\n● require intensive resources to generate a large amount of training data
[67]CNN-LSTM● CNN-LSTM-based SEM prediction for dynamic secure UAV trajectory optimization\n● Efficient spatiotemporal feature extraction via CNN-LSTM\n● Assume fixed UAV height and passive eavesdropper\n● Training CNN-LSTM network requires a substantial amount of data
[68]FL-DNN● FL-DNN-RL integration (FairLearn) for maximizing fairness in secrecy rates\n● Collaborative learning via FL improves generalization in anti-eavesdropping strategies\n● Involving multiple learning mechanisms requires significant computational resources\n● Assuming perfect CSI and eavesdropper localization may be impractical
Generative AI model[69]MD-GAN● MD-GAN with unknown CSI as model weights\n● Adapt to dynamic environments via gradient-based training\n● Do not require knowledge of the eavesdropper's detection threshold\n● Training a GAN can be computationally intensive
[70]DD-GAN● DD-GAN uses genetic algorithm-generated datasets for GAN training\n● Achieve an effective trade-off between covert rate and detection probability\n● Training relies on the quality and quantity of the genetic algorithm-generated data
[71]GDMTD3● GDMTD3 integrates generative diffusion models into TD3\n● Handle high-dimensional action spaces to adapt mobile eavesdroppers\n● Computational complexity from diffusion denoising steps
" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.408, + 0.492, + 0.558 + ], + "angle": 0, + "content": "efficiency (secrecy rate per unit power). Historical SEMs are fed into a CNN-LSTM network to predict future SEMs by leveraging spatial-temporal feature extraction and time-series correlation. Based on predicted SEMs, a trajectory planning algorithm dynamically guides the UAV to subareas with the highest security-efficiency indices. The proposed SEM-enabled trajectory planning achieves an average security-efficiency index of 0.81, outperforming baseline schemes (e.g., static trajectory [142] or non-predictive methods [62], [157]) by over \\(30\\%\\)." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.563, + 0.493, + 0.896 + ], + "angle": 0, + "content": "Previous deep learning-based architectures [66], [67] are centralized, lacking collaboration and knowledge sharing among UAVs, while also facing challenges in privacy preservation and scalability. To address these limitations and optimize secrecy rate maximization under constraints such as UAV mobility, power budgets, and scheduling fairness, the authors in [68] propose a federated learning (FL)-based framework (FairLearn). As shown in part B of Fig. 5, the FairLearn employs three learning modules: (1) Module-D uses RL to dynamically generate training datasets by exploring UAV trajectories, power allocation, and scheduling policies; (2) Module-P employs a DNN trained on these datasets to predict optimal 3D trajectory, transmit power, and user scheduling, maximizing proportional fairness in secrecy rates (defined as the difference between legitimate UAV-user rates and eavesdropper rates); (3) Module-C applies FL to aggregate DNN models across UAVs, enabling collaborative learning while preserving data privacy. Simulation results show that FairLearn's secrecy rate is \\(26.6\\%\\) higher than BCD at 1.4W transmit power. After 100s of execution, FairLearn achieves \\(14.34\\%\\), \\(24.56\\%\\), and \\(108\\%\\) higher secrecy rates than BCD, MAQ, and QCQP baselines, respectively." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.899, + 0.492, + 0.947 + ], + "angle": 0, + "content": "It is worth noting that UAVs can only obtain limited prior environmental information without knowing perfect channel information and the eavesdropper's detection threshold or ex" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.408, + 0.923, + 0.558 + ], + "angle": 0, + "content": "act location. Some previous methods [59], [65], [68] may find it difficult to solve the optimization problem in such scenarios. In contrast, the generative adversarial network (GAN) has emerged as a new model for solving optimization problems with limited prior information [158], [159]. GAN can effectively model and approximate unknown distributions (such as channel coefficients, detection thresholds, and environmental parameters) through adversarial learning, where the generator continuously improves its strategy by learning from the feedback from the discriminator [158]." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.568, + 0.923, + 0.947 + ], + "angle": 0, + "content": "The work in [69] addresses challenges related to partial channel distribution information and unknown eavesdropper detection thresholds by proposing a model-driven GAN (MDGAN) framework. The unknown channel coefficients and detection thresholds are treated as trainable weights in the MD-GAN. The MD-GAN transforms the joint trajectory and power optimization problem into a dynamic game between a generator (UAV) and a discriminator (eavesdropper), where the UAV acts as a jammer to protect secondary users from eavesdroppers. The generator optimizes the UAV's 3D trajectory and jamming power, while the discriminator evaluates detection errors. Then, a GAN-based joint trajectory and power optimization (GAN-JTP) algorithm is developed to achieve Nash equilibrium (i.e., maximizing the covert rate and the probability of detection errors). As shown in part C of Fig. 5, the GAN-JTP algorithm consists of two stages: network learning and network training. In the network learning stage, the generator optimizes the UAV's trajectory and transmit power based on the current state and environment. In the network training stage, the generator and discriminator are alternately trained using gradient backpropagation to update their weights. Simulation results show that increasing the training of the discriminator accelerates the convergence of the generator (e.g., when the training step is 10, convergence is achieved within 30 iterations, compared to 89 iterations when" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.907, + 0.031, + 0.92, + 0.041 + ], + "angle": 0, + "content": "13" + }, + { + "type": "image_caption", + "bbox": [ + 0.084, + 0.075, + 0.249, + 0.086 + ], + "angle": 0, + "content": "Part A. DNN-based Architecture" + }, + { + "type": "image", + "bbox": [ + 0.084, + 0.088, + 0.408, + 0.175 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.419, + 0.075, + 0.598, + 0.086 + ], + "angle": 0, + "content": "Part B. FL-DNN-based Architecture" + }, + { + "type": "image", + "bbox": [ + 0.419, + 0.088, + 0.911, + 0.174 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.086, + 0.184, + 0.27, + 0.195 + ], + "angle": 0, + "content": "Part C. MD-GAN-based Architecture" + }, + { + "type": "image", + "bbox": [ + 0.084, + 0.197, + 0.408, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.423, + 0.184, + 0.606, + 0.195 + ], + "angle": 0, + "content": "Part D. DD-GAN-based Architecture" + }, + { + "type": "image", + "bbox": [ + 0.423, + 0.197, + 0.911, + 0.287 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.419, + 0.297, + 0.603, + 0.308 + ], + "angle": 0, + "content": "Part E. GDMTD3-based Architecture" + }, + { + "type": "image", + "bbox": [ + 0.423, + 0.31, + 0.911, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.074, + 0.416, + 0.922, + 0.498 + ], + "angle": 0, + "content": "Fig. 5. The overall architecture illustrates various deep learning-based architectures designed to enhance anti-eavesdropping capabilities in UAV deployment scenarios. Part A presents a DNN-based architecture that processes air-ground and ground-ground channel states to determine UAV deployment. Part B shows an FL-DNN-based architecture, incorporating modules for reinforcement learning, DNN-based feature mapping, and FL. Part C depicts an MD-GAN-based architecture, where a generator produces trajectories and power outputs based on location and environment status, while a discriminator evaluates the decisions. Part D introduces a DD-GAN-based architecture, focusing on generating jamming solutions to maximize covert rates, with a discriminator distinguishing between jamming and non-jamming solutions. Part E illustrates a GDMTD3-based architecture, utilizing an experience replay buffer and diffusion reverse process to optimize UAV deployment strategies." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.523, + 0.492, + 0.582 + ], + "angle": 0, + "content": "the training step is 1). For a flight period of 100 seconds, the GAN-JTP algorithm achieves a \\(0.47\\%\\) increase in covert rate with a \\(0.15\\%\\) reduction in detection error probability compared to the BCD-based scheme [160]." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.583, + 0.493, + 0.914 + ], + "angle": 0, + "content": "The MD-GAN [69] relies on model-driven methods where the unknown channel information and detection threshold are treated as trained weights. Differently, the authors in [70] propose a data-driven GAN (DD-GAN) framework that focuses on generating data consisting of environmental parameters and optimal solutions to train the GAN. Specifically, the DD-GAN transforms the optimization process into an interactive game between the UAV and eavesdropper, where the UAV aims to maximize the covert rate, and the eavesdropper aims to detect the presence of covert communication. To address the non-convexity and lack of eavesdropper detection threshold information in the optimization process, the DD-GAN trains a generator (UAV) and discriminator (eavesdropper) adversarially, using genetic algorithm-generated samples as prior data, as shown in part D of Fig. 5. The generator produces power and trajectory solutions, while the discriminator evaluates the detectability. The loss function of the discriminator is designed to maximize the probability of correctly identifying real data and minimize the probability of being fooled by generated data. The generator's loss function aims to maximize the probability that the generated data is mistaken for real data by the discriminator." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.915, + 0.492, + 0.946 + ], + "angle": 0, + "content": "Besides GANs [69], [70], another generative model, the diffusion model, has advanced the effective representation" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.523, + 0.923, + 0.932 + ], + "angle": 0, + "content": "of multi-dimensional data distributions [161]. The diffusion model can better capture the complex dynamics and the tradeoff in the multi-objective optimization problem concerning secure communication [112]. For example, The diffusion model captures complex state-action distributions, enabling adaptive beamforming and UAV repositioning under eavesdropper mobility. To tackle dynamic environments and high-dimensional action spaces in secure communication and energy efficiency multi-objective optimization problem, the authors in [71] propose GDMTD3, a Twin Delayed Deep Deterministic Policy Gradient (TD3) algorithm enhanced with generative diffusion models. Key innovations include integrating diffusion-based reverse processes into the actor network for robust policy generation and addressing continuous action spaces, as shown in part E in Fig. 5. The training process of GDMTD3 involves initializing the online critic and actor networks, interacting with the environment, and updating the network parameters based on the collected experiences. The actor network uses a generative diffusion model to sample actions, while the critic networks evaluate the actions using twin critic networks to reduce overestimation bias. Simulation results show that GDMTD3 outperforms DRL-based benchmarks (including PPO, TD3, and DDPG), achieving about \\(50\\%\\) higher cumulative rewards and around \\(21\\%\\) higher average secrecy rate than TD3. In addition, when the number of UAVs increases from 4 to 8, the average secrecy rate increases accordingly. However, increasing the number of UAVs from 8 to 16 raises" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.907, + 0.032, + 0.921, + 0.041 + ], + "angle": 0, + "content": "14" + }, + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.071, + 0.915, + 0.107 + ], + "angle": 0, + "content": "TABLE VII SUMMARY OF AUTHENTICATION FOR COMMUNICATION CONFIDENTIALITY CIRCLES DESCRIBE THE METHODS; CORRECT MARKERS AND CROSS MARKERS REPRESENT PROS AND CONS RESPECTIVELY." + }, + { + "type": "table", + "bbox": [ + 0.084, + 0.117, + 0.915, + 0.348 + ], + "angle": 0, + "content": "
TechniquesReferenceAlgorithmPros & Cons
PUFs-based authentication[72]PUF\\(s^1\\)PUF-based dynamic session key generation and mutual authentication protocol\n✓ Lightweight design with no stored secrets\n✗ Potential overhead during temporary identity updates
[73]PUF-fuzzy extractorPUF-fuzzy extractor mutual authentication with TS-based dynamic session adaptation\n✓ Dynamic session time adaptation minimizes idle periods and enhancing security\n✗ Higher computational cost due to fuzzy extractors
[74]PUFs-fuzzy extractor-AEGISIntegration of PUFs, fuzzy extractors, and AEGIS for mutual authentication\n✓ The proposed password/biometric update mechanism reduces server dependency\n✗ Multiple cryptographic operations and protocols may be cause delay in the implementation
Channel based authentication[75]Rician channelPhysical-layer fingerprinting authentication based on Rician channel characteristics\n✓ Optimal power allocation balances data, AN, and authentication tag transmission\n✗ Reliance on encrypted tags requires high demand on UAV processing capabilities
[76]Rayleigh channelSNR difference-based PLA scheme\n✓ The SNR-based design can be implemented without additional hardware infrastructure\n✗ The simplified Rayleigh channel model may limit to real-world propagation environments
[77]Rayleigh/Rician channelsAD metric2for authentication under Rayleigh/Rician channels\n✓ AD metric-based method improves the detection accuracy of authentication\n✓ Detailed analysis of authentication performance under different propagation conditions\n✗ Computational complexity in Rician channels due to hypergeometric functions
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.077, + 0.348, + 0.905, + 0.369 + ], + "angle": 0, + "content": "Physical Unclonable Functions (PUFs) are hardware-based security primitives that exploit inherent and unique physical variations in devices to generate unclonable and unpredictable responses for communication authentication." + }, + { + "type": "table_footnote", + "bbox": [ + 0.077, + 0.369, + 0.849, + 0.381 + ], + "angle": 0, + "content": "2Authentication Distance (AD) is a metric proposed in [77] to distinguish legitimate and illegitimate signals for communication authentication." + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.348, + 0.905, + 0.381 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.408, + 0.492, + 0.438 + ], + "angle": 0, + "content": "energy consumption but only marginally improves secrecy rates, highlighting a performance-energy trade-off." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.438, + 0.491, + 0.589 + ], + "angle": 0, + "content": "Lesson Learned A key lesson learned is that deep learning, particularly through advanced architectures such as GANs [69], [70] and diffusion models [71], can address complex, dynamic environments with partial channel state information and unknown eavesdropper locations, while demonstrating superior performance over traditional methods [59], [65], [66]. These approaches demonstrate that deep learning not only strengthens the resilience of secure communications but also enables autonomous, real-time decision-making to counteract evolving eavesdropping threats in UAV networks." + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.609, + 0.31, + 0.623 + ], + "angle": 0, + "content": "B. Communication Authentication" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.628, + 0.491, + 0.822 + ], + "angle": 0, + "content": "In the LAENet, as UAVs operate in open environments and rely on wireless communication, they are highly vulnerable to security threats such as node capture and man-in-the-middle attacks [46]. Ensuring secure and reliable authentication between UAVs and ground stations/users or among UAVs is critical to preventing unauthorized access [52], [162]. Traditional cryptographic authentication schemes often impose significant computational and memory overheads and incur considerable lantency, making them unsuitable for resource-constrained UAVs [163]. Recently, advancements such as PUFs and Physical-layer Authentication (PLA) mechanisms have opened new possibilities for lightweight and effective authentication in the LAENet." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.824, + 0.492, + 0.947 + ], + "angle": 0, + "content": "PUFs are a class of hardware security primitives that leverage the inherent manufacturing variations (such as variations in circuit delay or RF properties) in semiconductor devices to generate unique and unpredictable responses [164]. When a specific input is applied to a PUF, the device generates a corresponding response, forming a challenge-response pair that is unique to this device [164]. Such uniqueness and unpredictability make PUFs highly resistant to cloning and" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.408, + 0.921, + 0.483 + ], + "angle": 0, + "content": "tampering, making them as a secure means for device authentication and key generation [165]. In addition, employing a PUF in a UAV allows for secure authentication without the need for complex cryptographic operations, making it an efficient solution for resource-constrained scenarios [166]." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.488, + 0.92, + 0.699 + ], + "angle": 0, + "content": "The work in [72] proposes a lightweight mutual authentication protocol, named SecAuthUAV, for securing UAV-ground station and UAV-UAV communications. SecAuthUAV employs PUFs in each UAV to generate a unique, unclonable session key that functions as a non-reproducible fingerprint. The protocol consists of three phases, as shown in Fig. 6. During UAV registration, a challenge-response pair from the UAV's PUF is stored, and a temporary identity is generated. In the UAV-ground station authentication phase, the UAV and ground station authenticate each other using challenge-response pairs and nonces, establish a session key, and update their identities. Lastly, in the UAV-UAV authentication phase, the GS facilitates secure communication by authenticating a second UAV and generates a session key for both UAVs." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.703, + 0.921, + 0.945 + ], + "angle": 0, + "content": "However, the work in [72] ignores the fact that the noise in PUFs can result in significant deviation in the output for the same input at different time points. In addition, [72] does not adjust the session time after establishing an authenticated session between two parties, which may lead to the active session remaining idle for a long time and thus give an opportunity for an adversary to interfere with the communication link. In light of this, the authors in [73] propose an UAV Authentication with Adaptive Session (UAAS) framework to address these challenges. Firstly, they combine PUFs and fuzzy extractors to address PUF noise. The fuzzy extractors consist of two phases: the \\( Gen(.) \\) phase creates a key and non-sensitive helper data, and the \\( Rep(.) \\) phase reconstructs the key from a noisy PUF response using the helper data while tolerating minor deviations. Then, the Thompson Sampling (TS)-based scheme is proposed to dynamically adapt the session time." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.907, + 0.031, + 0.92, + 0.041 + ], + "angle": 0, + "content": "15" + }, + { + "type": "image", + "bbox": [ + 0.081, + 0.068, + 0.918, + 0.309 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.074, + 0.32, + 0.925, + 0.381 + ], + "angle": 0, + "content": "Fig. 6. The overall architecture of the PUF-based authentication schemes for UAV-GS and UAV-UAV communication in [72]. Part A illustrates the PUF-based authentication process between a UAV and a ground station (GS). The UAV sends its ID and a nonce to the GS, which responds with a hash value based on the PUF, UAV ID, and nonce. The UAV then sends a value derived from the PUF and another nonce, and the GS verifies the authentication by comparing hash values. Part B shows the PUF-based authentication between two UAVs (U1 and U2) through the GS. After establishing a session key with the GS, U1 requests a connection to U2. The GS facilitates the authentication by generating a new session key, which is securely shared between U1 and U2." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.404, + 0.492, + 0.526 + ], + "angle": 0, + "content": "TS is a probabilistic approach that balances exploration and exploitation, determining the session time based on the fraction of busy time to minimize idle periods and reduce the risk of adversarial interference. Although the security analysis demonstrates that UAAS improves the security level in the mutual authentication mechanism, its throughput is \\(20.38\\%\\) lower and computational cost is 126 ms higher than the baseline [72] due to security overhead." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.531, + 0.493, + 0.804 + ], + "angle": 0, + "content": "In the LAENet, while establishing mutual authentication between UAVs and ground stations is critical, it is also important to incorporate role-specific access controls for users to ensure communication confidentiality and preventing unauthorized access [167]. The work in [74] proposes an authentication framework PUF-enabled authentication framework for Internet of Drones (PAF-IoD) to establish mutual authentication among users, UAVs, and ground stations. Users need to authenticate with stations to access the stored data or communicate directly with UAVs, where the users' authentication mechanism includes three factors (identity, password, and biometric data). Similar to [73], PAF-IoD uses PUFs and a fuzzy extractor in the authentication process to generate a unique and tamper-proof session key while tolerating the noise in PUFs. Furthermore, the designed authenticated encryption with associative data (AEAD)-based encryption algorithm is utilized for encrypting and decrypting messages exchanged between the user, ground station server, and UAVs." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.809, + 0.493, + 0.947 + ], + "angle": 0, + "content": "In addition to leveraging intrinsic physical properties of hardware for authentication design through PUFs [72]–[74], the characteristics of communication channels can be used for authentication. The PLA mechanism authenticates devices by exploiting the unique physical characteristics of wireless communication channels, such as CSI, received signal strength (RSS), and signal-to-noise ratio (SNR) [168]. The main reason is that the wireless channel between two communicating entities exhibits location-specific and time-varying properties" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.404, + 0.923, + 0.525 + ], + "angle": 0, + "content": "due to multipath propagation, fading, and environmental factors [169]. These diverse physical channel conditions, which provide a robust set of features for authentication, have been investigated in terrestrial communication networks [168]–[170]. Furthermore, the source of received signals can be accurately and promptly detected [170], making PLA particularly advantageous in the dynamic and complex communication environments of the LAENet." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.531, + 0.923, + 0.744 + ], + "angle": 0, + "content": "The authors in [75] leverage the unique properties of the physical layer channel, Rician channel, to develop a PLA for UAV-ground station communication. Given that UAVs receive signals subject to the Rician fading model, the ground station integrates authentication directly into the transmission process. It employs a one-way collision-resistant function (e.g. cryptographic hash function) that combines data symbols with a shared secret key to generate a low-power authentication tag for UAV and seamlessly embeds it into the transmitted signal. The authentication tag is validated by the correlation shaped by the Rician statistical characteristics of the fading channel, i.e., the correlation between the estimated tag (derived from the received signal) and the expected tag (generated using the secret key and decoded data symbols)." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.749, + 0.923, + 0.947 + ], + "angle": 0, + "content": "However, the work in [75] still partially relies on cryptographic tag generation for authentication, which may not be suitable for UAVs with limited processing capabilities. The study in [76] leverages channel characteristics and geographical locations for PLA design, where the SNR differences between consecutive transmissions are utilized as the authentication metric. Specifically, a legitimate transmitter and a jammer have distinct channel variations due to differences in their geographical locations. The UAV authenticates the legitimate transmitter or jammer by formulating a binary hypothesis test based on the SNR difference between two successive transmissions. If the difference falls within a predefined threshold, the transmission is authenticated as from the legitimate" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.907, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "16" + }, + { + "type": "image_caption", + "bbox": [ + 0.117, + 0.079, + 0.419, + 0.093 + ], + "angle": 0, + "content": "Part A. Channel-based authentication in urban" + }, + { + "type": "image", + "bbox": [ + 0.128, + 0.101, + 0.445, + 0.306 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.117, + 0.333, + 0.44, + 0.346 + ], + "angle": 0, + "content": "Part B. Channel-based authentication in suburban" + }, + { + "type": "image", + "bbox": [ + 0.116, + 0.354, + 0.456, + 0.56 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.578, + 0.493, + 0.673 + ], + "angle": 0, + "content": "Fig. 7. The overall architecture of the channel-based authentication in urban and suburban environments in [77]. Part A depicts the authentication process in an urban environment under Rayleigh channel conditions. The UAV receiver calculates the SNR, computes the AD, and compares it with a detection threshold to distinguish between legitimate and malicious sensors. Part B illustrates the authentication process in a suburban environment, where the UAV receiver performs similar steps to authenticate legitimate sensors and detect malicious ones under Rayleigh channel conditions." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.701, + 0.491, + 0.822 + ], + "angle": 0, + "content": "transmitter; otherwise, it is classified as a jammer. The closed-form expressions for the probability density function of SNR differences, false alarm rate (FAR), and miss detection rate (MDR) are derived under Rayleigh fading channels in single-UAV and dual-UAV scenarios. The non-convex optimization problem of minimizing MDR under FAR constraints is solved using an SCA algorithm, which outperforms the RSS-based baseline [90] by about \\(40\\%\\)." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.824, + 0.492, + 0.946 + ], + "angle": 0, + "content": "It is worth noting that study [75] may lack a comprehensive analysis of the UAV-PLA performance under different propagation conditions. Additionally, the detection performance may be further improved with other indicators. As shown in Fig. 7, the work in [77] proposes a novel PLA framework under different propagation conditions, including dense urban and suburban environments modeled by Rayleigh and Rician channels, respectively. A new metric, Authentication Distance" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.069, + 0.923, + 0.265 + ], + "angle": 0, + "content": "(AD), is proposed as the normalized difference in received SNR between adjacent transmissions. For Rayleigh channels, closed-form expressions for FAR and MDR are derived using convolution and integration-by-parts, while Rician channels employ doubly non-central \\( F \\) distributions to model AD statistics. Similar to study [76], this authentication framework minimizes MDR under FAR constraints. In dense urban settings, MDR depends on path loss and transmitter-UAV geometry. For suburban environments, it incorporates elevation angle-dependent Rician factors and path loss exponents to improve discriminability between legitimate and illegitimate signals. The proposed AD-based method outperforms the SNR-difference baseline [171], achieving 40–60% lower MDR." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.266, + 0.923, + 0.432 + ], + "angle": 0, + "content": "Lesson Learned. Leveraging physical-layer characteristics, such as PUFs and channel properties, in conjunction with communication models and optimization algorithms, has proven effective in enhancing authentication accuracy and reducing detection errors. However, some methods also reveal limitations. For instance, the assumptions of ideal channel conditions and the neglect of practical implementation constraints may limit the applicability of the proposed solutions [76], [77]. Future research should focus on addressing these limitations by exploring more practical channel models and considering the trade-offs between security and system complexity." + }, + { + "type": "title", + "bbox": [ + 0.53, + 0.452, + 0.896, + 0.466 + ], + "angle": 0, + "content": "IV. COMMUNICATION AVAILABILITY FOR LAENET" + }, + { + "type": "title", + "bbox": [ + 0.504, + 0.472, + 0.686, + 0.487 + ], + "angle": 0, + "content": "A. Anti-Jamming Strategy" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.492, + 0.923, + 0.686 + ], + "angle": 0, + "content": "Jamming attacks pose significant challenges to communication availability in the LAENet by disrupting legitimate communication links and degrading the performance of aircraft communication networks [79], [172]. As shown in Fig. 10, these attacks can exploit the openness and broadcasting nature of UAV networks, making them particularly vulnerable to interference [79]. Malicious jammers can transmit strong signals that weaken signal strength, degrade signal quality, and increase communication delays, leading to unreliable coverage and potential paralysis of the entire network [172], [173]. This vulnerability underscores the urgent need for effective anti-jamming technologies to ensure reliable communication in the LAENet." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.688, + 0.923, + 0.869 + ], + "angle": 0, + "content": "Various anti-jamming strategies have been explored to safeguard the LAENet against malicious jamming, mainly focusing on trajectory adjustment, as well as channel and power control. Overall, by adjusting the trajectory in the spatial domain, an UAV can evade jamming signals while maintaining reliable communication with legitimate devices [80], [173]. Besides the spatial-domain anti-jamming strategy, the UAV can implement a frequency-domain-based anti-jamming scheme. The UAV can select legitimate channels while avoiding jamming signals and control transmit power to minimize energy consumption and latency under jamming attacks [83], [84]." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.87, + 0.922, + 0.946 + ], + "angle": 0, + "content": "Convex optimization methods can be used to adjust the UAV's trajectory to achieve anti-jamming by strategically guiding its movement to reduce interference and enhance communication reliability [80], [173]. It provides a systematic and efficient approach to handle the complex, non-convex" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.907, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "17" + }, + { + "type": "table_caption", + "bbox": [ + 0.084, + 0.071, + 0.914, + 0.106 + ], + "angle": 0, + "content": "TABLE VIII SUMMARY OF ANTI-JAMMING STRATEGY FOR COMMUNICATION AVAILABILITY CIRCLES DESCRIBE THE METHODS; CORRECT MARKERS AND CROSS MARKERS REPRESENT PROS AND CONS RESPECTIVELY." + }, + { + "type": "table", + "bbox": [ + 0.089, + 0.117, + 0.91, + 0.336 + ], + "angle": 0, + "content": "
TechniquesReferenceAlgorithmPros & Cons
Convex optimization[80]BCD, SCABCD and SCA for UAV 3D trajectory optimization for anti-jamming\n✓ Probabilistic LoS performs well in real-world scenarios such as urban environments\nX High computational complexity may be challenging in resource-constrained environments
[81]SCA, DinkelbachSCA and Dinkelbach algorithm for energy-efficient trajectory optimization under malicious jammers\n✓ Balance between throughput and energy consumption in anti-jamming\nX Assume static and LoS-dominated channels
[82]BCD, SCABCD and SCA for joint UAV trajectory and transmit power optimization under jamming\n✓ Improve throughput by considering transmit power optimization against jammers\nX Assume a fixed UAV altitude and a static channel environment
Multi-agent RL[87]MALQLCollaborative MALQL algorithm for anti-jamming with channel and power allocation\n✓ Accelerate convergence compared to single-agent Q-learning\nX Assume predefined UAV trajectories limits to adaptability
[88]MARLMARL with adversarial pre-training for dynamic and generalized jamming\n✓ Generalize to various jamming patterns via adversarial populations for pre-training\nX Pre-training for generalized jamming may require significant offline resources
[89]MATD3MATD3 algorithm with PER for dynamic resource management under jamming attacks\n✓ Handle high-dimensional continuous action spaces\nX The integration of PER and spectrum sensing may increase the computational complexity
" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.362, + 0.492, + 0.544 + ], + "angle": 0, + "content": "problems that arise when optimizing UAV trajectories and various constraints under malicious jamming conditions [131]. The work in [80] investigates anti-jamming 3D trajectory design for UAV-enabled wireless sensor networks under a probabilistic LoS channel model. The probabilistic LoS model accounts for elevation angle-dependent shadowing effects in urban environments compared with simplified LoS models. The BCD and SCA algorithms are employed to optimize the UAV's horizontal and vertical trajectory, allowing the UAV to move closer to the ground station for improved transmission rates while dynamically adjusting its elevation angle relative to the jammer to mitigate interference." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.548, + 0.493, + 0.835 + ], + "angle": 0, + "content": "However, the anti-jamming trajectory optimization in [80] under the probabilistic LoS model does not consider the energy consumption issue. The study in [81] utilizes SCA and Dinkelbach's algorithm to adjust the UAV's trajectory to avoid areas with jammers while maximizing energy efficiency, which is defined as the ratio of total throughput to propulsion energy consumption during flight. Compared to hovering-centric benchmarks, the optimized trajectory reduced energy consumption by \\(82\\%\\) while maintaining \\(73.16\\%\\) of the sum throughput. It is worth noting that the transmit power of the UAV and station is fixed in [81], whereas power optimization is also an important factor for energy efficiency. The authors in [82] use the SCA and BCD algorithms to maximize throughput by iteratively optimizing power allocation (via convex reformulation of throughput bounds) and UAV trajectory (via slack variables for distance constraints and jamming mitigation) to avoid jamming signals. The proposed scheme achieves \\(40\\%\\) higher throughput compared to the \"Line trajectory with fixed power\" baseline." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.839, + 0.492, + 0.947 + ], + "angle": 0, + "content": "While convex optimization methods [80]–[82] work well for fixed jamming patterns, they may struggle to handle dynamic, intelligent jamming [174] in real-time due to their reliance on global information and the challenges inherent in solving nonconvex problems with increased optimized variables [142]. In contrast, RL and DRL offer significant advantages by enabling autonomous, adaptive decision-making [143], [147]. These" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.362, + 0.923, + 0.635 + ], + "angle": 0, + "content": "approaches can continuously adjust to environmental changes, learn from past interactions, and optimize performance in real-time [144], [175]. The RL-based anti-jamming methods have emerged as a promising solution due to their ability to operate without excessive prior information (such as unknown environment, CSI, and jamming mode) [147]. Single-agent RL algorithms have been used in previous works to develop anti-jamming strategies in communication networks by regarding jammers and other legitimate users as part of the environment, including independent anti-jamming channel selection methods [83]–[86]. However, these single-agent approaches may fail to converge when dealing with a large number of agents or a high-dimensional action-state space [87], making them impractical for complex, multi-agent scenarios in the LAENet. To address these limitations, multi-agent RL (MARL) methods have been proposed to allow each agent to make decisions based on local information and exchange data with others (such as observations or model parameters)." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.638, + 0.923, + 0.911 + ], + "angle": 0, + "content": "The study in [87] proposes a collaborative multiagent layered Q-learning (MALQL) algorithm for anti-jamming communication in UAV networks by jointly optimizing channel and power allocation to maximize system Quality of Experience (QoE). The problem is modeled as a local interaction Markov game based on the constructed interference graph. The MALQL divides the problem into two subgames of channel selection (Layer 1) and power allocation (Layer 2), as shown in part B of Fig. 10. The channel layer uses a graph-based interference model to capture mutual interference among UAVs. Each UAV is represented as a node, and edges are formed between UAVs that are within a predefined interference distance. This model allows UAVs to identify and avoid channels that are being used by neighboring UAVs or jammed by external attackers, thereby reducing the jamming likelihood. The power layer optimizes transmit power to meet rate thresholds. Theoretical analysis confirms that MALQL can converge to a pure strategy Nash equilibrium." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.915, + 0.922, + 0.947 + ], + "angle": 0, + "content": "Nevertheless, there are still some issues with the anti-jamming mechanism in [87]. Considering that the rapid mo" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.907, + 0.031, + 0.92, + 0.041 + ], + "angle": 0, + "content": "18" + }, + { + "type": "image_caption", + "bbox": [ + 0.084, + 0.073, + 0.328, + 0.086 + ], + "angle": 0, + "content": "Part A. Overall system model under jamming" + }, + { + "type": "image", + "bbox": [ + 0.086, + 0.092, + 0.326, + 0.214 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.344, + 0.073, + 0.582, + 0.086 + ], + "angle": 0, + "content": "Part B. MALQL-based anti-jamming scheme" + }, + { + "type": "image", + "bbox": [ + 0.342, + 0.092, + 0.59, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.602, + 0.073, + 0.905, + 0.096 + ], + "angle": 0, + "content": "Part C. Population update of pre-training for generalized anti-jamming scheme" + }, + { + "type": "image", + "bbox": [ + 0.603, + 0.1, + 0.905, + 0.214 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.089, + 0.227, + 0.353, + 0.241 + ], + "angle": 0, + "content": "Part D. PER-MATD3-based anti-jamming scheme" + }, + { + "type": "image", + "bbox": [ + 0.09, + 0.251, + 0.492, + 0.368 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.763, + 0.235, + 0.899, + 0.246 + ], + "angle": 0, + "content": "Actor-Critic network for agent k" + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.251, + 0.9, + 0.37 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.074, + 0.395, + 0.922, + 0.466 + ], + "angle": 0, + "content": "Fig. 8. The overall architecture illustrates various reinforcement learning-based anti-jamming schemes designed to enhance communication reliability in UAV-assisted MEC systems under jamming attacks. Part A presents the overall system model, depicting UAVs and jammers interacting within a dynamic environment. Part B shows the MALQL-based anti-jamming scheme, where agents use layered Q-learning to determine actions based on local observations and rewards. Part C depicts the population update mechanism for pre-training a generalized anti-jamming scheme, involving a jammer population, trajectory encoder, and decoder network to optimize jamming policies. Part D introduces the PER-MATD3-based anti-jamming scheme, incorporating a priority experience replay buffer and actor-critic networks to dynamically allocate resources and optimize UAV deployment strategies." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.491, + 0.491, + 0.868 + ], + "angle": 0, + "content": "bility of UAVs may expose them to various and unknown jamming patterns due to frequent transitions to new scenarios, the anti-jamming methods need to be generalized [176], especially in the LAENet. The work [87] randomly initializes strategies and learns from scratch for a particular deployment environment with no pretraining, which may lead to a reduction in the generalization ability of the anti-jamming strategy. In light of this, the authors in [88] introduce an adversarial pre-training stage in the proposed two-stage MARL with a decentralized partially observable Markov decision process. Specifically, the adversarial pre-training stage uses a quality-diverse jammer population (e.g., fixed, random, sweeping, statistic, and RL-based jamming) to bootstrap generalized anti-jamming strategies instead of directly initializing the agents with random anti-jamming policies, as shwon in part C of Fig. 10. This pre-training ensures that UAVs are not overfitted to specific jamming patterns and can generalize to new jamming attacks in real-world deployments. The pre-trained policies are deployed in the fine-tuning stage, where a graph convolutional-based MARL algorithm is proposed to jointly optimize channel selection and power allocation for anti-jamming similar to [87]. Simulation results show that the proposed solution achieves \\(20 - 30\\%\\) higher cumulative rewards than collaborative multi-agent Q-learning [177] and independent Q-learning [83] under fixed and sweeping jamming." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.87, + 0.492, + 0.947 + ], + "angle": 0, + "content": "Note that previous RL-based anti-jamming strategies [87], [88] mainly rely on the Q-learning method, which is suitable for discrete action spaces but may be limited in dealing with high-dimensional continuous spaces [147], [148]. The authors in [89] propose a PER-MATD3 algorithm against jamming" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.491, + 0.923, + 0.763 + ], + "angle": 0, + "content": "by integrating spectrum-aware channel selection and prioritized experience replay (PER) into an MADRL framework, as shown in part D of Fig. 10. The proposed spectrum-aware intelligent channel selection uses energy detection-based spectrum sensing, enabling UAVs to identify and avoid jammed channels. The TD3 is specifically designed to handle continuous-valued states and actions, where two critic networks, target policy smoothing, and delayed policy updates are used to further stabilize DRL training. By leveraging PER, the agents can learn from high-error experiences, thereby accelerating adaptation to time-varying CSI, imperfect jamming detection, and co-channel interference. By jointly optimizing CPU frequency, bandwidth allocation, and channel selection to minimize the impact of jamming, PER-MATD3 reduces system cost (a linear combination of latency and energy consumption) by approximately \\(16.7\\%\\), \\(9.1\\%\\), and \\(1.2\\%\\) compared to the baselines of Q-learning, MATD3-JSC (without PER), and PER-MATD3 (without channel selection), respectively." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.764, + 0.923, + 0.946 + ], + "angle": 0, + "content": "Lesson Learned. Recent advancements in anti-jamming strategies show that intelligent decision-making for trajectory control, channel selection, and power control is essential for effective jamming mitigation. A key takeaway is the successful integration of MARL to develop dynamic and adaptive anti-jamming solutions [75]. By employing intelligent algorithms such as adversarial pre-training and decentralized decision-making, UAV networks can generalize anti-jamming strategies across diverse environments [76], [77]. However, challenges persist in the generalization of these strategies across various jamming types and environmental conditions, as well as balancing the trade-offs between energy consumption," + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.907, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "19" + }, + { + "type": "image_caption", + "bbox": [ + 0.084, + 0.073, + 0.289, + 0.085 + ], + "angle": 0, + "content": "Part A. ML-based spoofing detection" + }, + { + "type": "image", + "bbox": [ + 0.084, + 0.086, + 0.484, + 0.22 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.089, + 0.233, + 0.345, + 0.246 + ], + "angle": 0, + "content": "Part B. Rule and ML-based spoofing detection" + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.251, + 0.489, + 0.412 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.43, + 0.493, + 0.546 + ], + "angle": 0, + "content": "Fig. 9. The overall framework of ML and rule-based spoofing detection for GPS spoofing detection in the LAENet. Part A depicts an ML-based spoofing detection mechanism in [93], where multiple CNN classifiers are trained with updated sample weights to form an integrated classification model. Each CNN transfers its optimized parameters to subsequent classifiers, enhancing the model's robustness. Part B presents a hybrid rule and ML-based approach in [94], where statistical analysis of path losses between UAVs and multiple base stations (BSs) is performed by edge servers. The analyzed data is processed through MLPs to generate individual predictions, which are aggregated to produce a final spoofing detection decision." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.572, + 0.491, + 0.663 + ], + "angle": 0, + "content": "latency, and throughput. Future research could delve into the integration of more adaptive learning frameworks (such as deep learning) into the LAENet for anti-jamming, enabling it to better manage partial or imperfect environmental observations for low-latency, real-time decision-making in multi-UAV systems." + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.684, + 0.219, + 0.699 + ], + "angle": 0, + "content": "B. Spoofing Defense" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.704, + 0.493, + 0.947 + ], + "angle": 0, + "content": "In the LAENet, the openness of A2G transmission channels and the dynamic nature of low-altitude aircraft networks make them particularly susceptible to identity-based spoofing attacks [50]. In such attacks, a malicious entity impersonates a legitimate transmitter using falsified identity information, such as a spoofed media access control address, to gain unauthorized access to the network [52]. Once authenticated, the spoofer can disrupt communications among aircraft by launching more severe attacks, such as rogue access point infiltration and denial-of-service attacks, ultimately leading to network congestion and service outages [75]. Given the limitations of conventional authentication methods that rely on complex cryptographic protocols [52], PLA offers a promising alternative by leveraging the inherent and unique physical characteristics of wireless transmissions for the LAENet, which is introduced in Section III-B. Overall, this type of PLA can defend against spoofing" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.07, + 0.921, + 0.129 + ], + "angle": 0, + "content": "attacks in the LAENet by exploiting the unique characteristics of the wireless channel (such as RSS, Rayleigh channel, and Rician channel) to identify and separate between legitimate devices and spoofers." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.13, + 0.922, + 0.492 + ], + "angle": 0, + "content": "The work in [90] proposes a PLA framework to detect spoofing attacks by exploiting spatial correlations of RSS in A2G channels. The key idea is that the RSS from a legitimate transmitter will remain relatively consistent due to its fixed location, while the RSS from a spoofer will vary significantly because of its different position and channel conditions. Thus, the UAV receiver can perform a hypothesis test to authenticate incoming signals. if the RSS distance between the current signal and a previously authenticated signal is below a predefined threshold, the signal is accepted as legitimate. Otherwise, it is flagged as a spoofing attempt. However, the work [90] is considered under an ideal transmission scenario, where the propagation environment is perfectly exempted from external interference. To address this limitation, the authors in [91] develop a PLA framework that accounts for channel randomness and interference uncertainty. First, they model the G2A link as a Rayleigh fading channel. Then, they introduce jamming signals as external interference. By modeling the jamming power statistically and incorporating it into the analysis of detected power differences, if the difference in power exceeds a predefined threshold, it is identified as a spoofing attempt. Thus, even in real-world scenarios with interference, the proposed framework can better differentiate between natural channel fading and anomalies caused by spoofing attacks." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.492, + 0.922, + 0.809 + ], + "angle": 0, + "content": "In addition to using the statistical properties of the Rayleigh channel to design PLA against spoofing in environments with multipath fading (such as urban areas), the channel characteristics in suburban environments should also be considered. To address this, the work [77] proposes a PLA framework to counter spoofing attacks in both urban (Rayleigh channel) and suburban (Rician channel) environments. As mentioned earlier (in Section III-B), a new metric AD is devised to distinguish between legitimate signals and spoofing signals based on differences in channel randomness and geographical factors, such as elevation angles and distances. Adopting the unique fading characteristics of Rayleigh and Rician channels makes it statistically difficult for a spoofer to accurately mimic a legitimate signal. By considering elevation angles and distances in channel modeling, it ensures that a spoofer cannot easily replicate a legitimate signal even if the spoofer knows the legitimate transmitter's location. Simulation results show that the probability of a successful spoofing attack is significantly reduced compared to the baseline [171], where the spoofing miss detection probability drops to 0.014 in suburban environments and 0.371 in dense urban areas." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.81, + 0.922, + 0.945 + ], + "angle": 0, + "content": "In the LAENet, in addition to being vulnerable to identity-based spoofing attacks, aircrafts are also susceptible to signal spoofing attacks from the Global Navigation Satellite System (GNSS), particularly GPS spoofing, which poses a significant security threat by generating and transmitting counterfeit satellite signals resulting in severe positioning deviations [25]. By interfering with or suppressing legitimate GNSS signals, attackers can manipulate UAV locations in an imperceptible manner to mislead UAVs, causing deviations from intended" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.907, + 0.032, + 0.921, + 0.041 + ], + "angle": 0, + "content": "20" + }, + { + "type": "table_caption", + "bbox": [ + 0.084, + 0.071, + 0.913, + 0.106 + ], + "angle": 0, + "content": "TABLE IX SUMMARY OF SPOOFING DEFENSE FOR COMMUNICATION AVAILABILITY CIRCLES DESCRIBE THE METHODS; CORRECT MARKERS AND CROSS MARKERS REPRESENT PROS AND CONS RESPECTIVELY." + }, + { + "type": "table", + "bbox": [ + 0.086, + 0.117, + 0.913, + 0.348 + ], + "angle": 0, + "content": "
TechniquesReferenceAlgorithmPros & Cons
PLA[90]RSSSpatial correlations of RSS distances in PLA against spoofing attacksUse RSS-based channel characteristics to reduce PLA computational complexityAssume an ideal transmission scenario without external interference
[91]Rayleigh channelDefend against spoofing attacks by considering channel randomness and jammingSimultaneously address spoofing and jamming attacks via PLAAssume static UAVs and a known jamming distribution
[77]Rayleigh and Rician channelsAD-based PLA for spoofing defense under Rayleigh and Rician channelsProvide a thorough analysis of spoofer identification in urban and suburban environmentsAssume perfect CSI in channel modeling
GNSS spoofing detection[92]Rule-based detectionCombine cooperative localization mechanism with Stackelberg game against spoofing attacksSpoofing detection is based on neighboring UAV signal sources without predefined thresholdsExtending to larger UAV groups may require complex adjustments
[93]ML-based detectionImproved AdaBoost-CNN for multi-modal spoofing attack identificationHigh accuracy in identifying spoofing attacks with limited data samplesDependence on predefined signal features may lead to model overfitting
[94]Rule & ML-based detectionMLP and statistical feature extraction on path-loss data for detecting GPS spoofingNo additional hardware/energy burden on UAVsRobust performance under sparse base station coverageSpoofing detection performance degrades in areas with unstable cellular signals
" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.374, + 0.492, + 0.587 + ], + "angle": 0, + "content": "flight paths, violations of no-fly zone regulations, or increased collision risks [46]. Given a critical role of GNSS in UAV operations, effective detection and mitigation strategies for spoofing attacks are essential to ensure flight safety and prevent security breaches in the LAENet. Currently, studies on signal spoofing attack recognition in the LAENet mostly focuses on recognizing GNSS spoofing attack detection, which primarily falls into two categories with respect on rule-based and ML-based methods [19], [25]. Rule-based detection methods typically assess the relative distance and positional deviations of UAVs to determine if they are under GNSS spoofing attack. On the other hand, the ML-based methods pay attention to recognize different spoofing types by learning the characteristics of received signals." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.598, + 0.493, + 0.947 + ], + "angle": 0, + "content": "Generally, the simplified rule-based methods determine whether a UAV has encountered spoofing attacks based on whether its trajectory follows a predictable path [178], [179], since a UAV may exhibit deviations from this path due to the false signals imposed by the spoofer. If the measured deviations exceed predefined thresholds, it indicates a potential spoofing attack. However, relying on predefined thresholds for detecting deviations may not dynamically adapt to the spoofing attacks. The study in [92] proposes a defense mechanism based on cooperative localization, where each UAV uses the relative distances and real locations of neighboring UAVs to detect spoofing attacks. Specifically, each UAV measures its relative distances based on alternative signal sources of neighboring UAVs and compares these results with its own GPS-derived location. If inconsistencies are detected (e.g., the GPS-derived location does not match the majority of the calculated locations), the UAV identifies itself or a neighboring UAV as being under attack. To optimize defense deployment, an equilibrium of a dynamic Stackelberg game is derived between the drone operator (leader) and the spoofer (follower). Simulation results show that the proposed scheme can effectively prevent spoofer's capture, while random/deterministic baselines suffer from attackers capturing one to two UAVs." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.374, + 0.923, + 0.766 + ], + "angle": 0, + "content": "Recent ML-based methods for spoofing defense primarily focus on binary classification concerning normal GPS signals and spoofing signals [180], [181]. However, they fail to recognize specific types of spoofing attack necessary for countermeasures in complex environments. Hence, there is an urgent need to recognize diverse GPS spoofing attack patterns for effective countermeasures for the LAENet. The authors in [93] propose an improved AdaBoost-CNN algorithm to address the challenge of recognizing diverse GPS spoofing attack patterns for UAVs, as shown in part A of Fig. 9. Three categorized spoofing attack patterns are considered including static and dynamic spoofing based on the UAV's motion state, power-matched and overpowered spoofing based on signal power, and position and time spoofing based on the spoofing targets. The authors select key GPS spoofing signal features such as signal quality monitoring, carrier-to-noise ratio, Doppler shift, and clock error to train the classification model. The improved AdaBoost-CNN algorithm integrates multiple weak CNN classifiers into a strong classification model. Each CNN base classifier uses the updated network parameters from the previous CNN as initial values, enabling iterative refinement of network weights to enhance feature extraction and generalization. With 800 simulated samples, the improved AdaBoost-CNN achieves \\(100\\%\\) accuracy, outperforming original AdaBoost-CNN \\((94.38\\%)\\), CNN \\((74.38\\%)\\), DNN \\((60.94\\%)\\), SVM \\((40.63\\%)\\), and KNN \\((53.13\\%)\\)." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.779, + 0.923, + 0.947 + ], + "angle": 0, + "content": "Furthermore, integrating rule-based approaches with machine learning-based methods provides an effective and robust defense against spoofing attacks. The work in [94] leverages statistical features of path losses between UAVs and terrestrial base stations to detect a UAV's trajectory deviation due to GPS spoofing, as shown in part B of Fig. 9. The spoofing detection is formulated as a nonlinear optimization problem that aims to minimize hypothesis test errors by adjusting thresholds, statistical feature weights, and the number of base stations. To further accurately analyze path loss's statistical features for final decisions on predicting GPS spoofing probabilities," + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.905, + 0.031, + 0.92, + 0.041 + ], + "angle": 0, + "content": "21" + }, + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.071, + 0.915, + 0.107 + ], + "angle": 0, + "content": "TABLE X SUMMARY OF ANOMALY DETECTION FOR COMMUNICATION INTEGRITY CIRCLES DESCRIBE THE METHODS; CORRECT MARKERS AND CROSS MARKERS REPRESENT PROS AND CONS RESPECTIVELY." + }, + { + "type": "table", + "bbox": [ + 0.1, + 0.117, + 0.898, + 0.348 + ], + "angle": 0, + "content": "
Anomaly typeReferenceAlgorithmPros & Cons
Jamming[98]HDBNSA module based on HDBN for detecting jamming anomalies\n✓ UAccurately characterize and detect jamming anomalies via KLD/DB metrics\n✗ Unstable initialization in unsupervised learning affects the performance of the HDBN
[99]GDBNGDBN to model the radio environment and detect and classify jamming anomalies\n✓ Unsupervised learning eliminates dependency on labeled data in classification of anomalies\n✗ Computational complexity increases with the number of jamming categories
[100]Active-GDBNActive-GDBN used to model UAV-jammer interactions for anomaly detection\n✓ Actively incorporate UAV's actions for faster adaptation and jamming detection\n✗ M-MJPF requires significant computational resources
[101]Blind channel estimation & ACSBlind channel estimation based on ACS properties to detect jammer signals\n✓ Does not rely on prior knowledge of the jammer's behavior\n✗ Assumes a specific structure of the multicarrier modulation format
Abnormal Power[102]Spectrum surveillanceLocal and cooperative detection of abnormal power emission\n✓ Handle both aggressive and passive power misuse\n✓ Cloud-based framework enables real-time closed-loop surveillance\n✗ Computational complexity increases with the number of SN
Eavesdropping[103]SVM & K-meansOne-class SVM and K-means clustering for detecting eavesdropping anomalies\n✓ One-class SVM and K-means are stable under varying eavesdropper power\n✗ Detection performance mainly depends on the quality and quantity of the ATD
" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.374, + 0.492, + 0.496 + ], + "angle": 0, + "content": "multilayer perceptron (MLP) neural networks are deployed on edge cloud servers, where individual MLP models at each BS are used to analyze statistical features of path losses. Simulation results show that the proposed method achieves \\(97\\%\\) accuracy with two base stations and \\(83\\%\\) accuracy with a single base station, outperforming baseline approaches such as adaptive trustable residence area (ATRA), which necessitates three base stations for triangulation [182]." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.495, + 0.493, + 0.768 + ], + "angle": 0, + "content": "Lesson Learned. For identity spoofing in the LAENet, leveraging signal features such as received signal strength and channel randomness in PLA design is an effective approach [77], [90], [91]. On the other hand, employing rule-based or ML-based techniques can detect and mitigate GNSS signal spoofing [92]–[94]. While ML-based methods show promising performance, they are limited by factors such as computational complexity and dependency on large datasets. Rule-based methods are simpler but may struggle in dynamic or uncertain environments. Future research could explore the application of RL to develop adaptive and robust spoofing defense mechanisms in the LAENet, which has not yet been extensively studied. Different from the abovementioned approaches, RL dynamically learns from interactions with the environment, and its sequential decision-making ability enables UAVs and ground stations to optimize spoofing defense strategies based on continuous feedback [147], make it a promising direction for enhancing spoofing defense in the LAENet" + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.785, + 0.452, + 0.8 + ], + "angle": 0, + "content": "V. COMMUNICATION INTEGRITY FOR LAENET" + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.805, + 0.233, + 0.82 + ], + "angle": 0, + "content": "A. Anomaly Detection" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.824, + 0.492, + 0.947 + ], + "angle": 0, + "content": "Due to the open nature of wireless channels and the dominant LoS links in the LAENet, communication becomes particularly vulnerable to a diverse range of anomalous behaviors such as abnormal jamming, abnormal transmission power, and covert eavesdropping [46], [49]. Specifically, malicious jammers sense spectrum activity and dynamically adapt their interference patterns to mislead the UAV into taking suboptimal or harmful actions [81], [95]. In parallel, abnormal" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.374, + 0.923, + 0.571 + ], + "angle": 0, + "content": "power emissions, either due to device faults, selfish behavior, or malicious intent, can violate spectrum policies, introduce harmful interference, and disrupt cooperative spectrum sharing [96]. Additionally, the pervasive risk of eavesdropping is that adversaries exploit the UAV's uplink or downlink transmissions to intercept sensitive data [61], [67]. Thus, it is essential to detect and mitigate these abnormal activities in the LAENet. Different from previously reviewed approaches such as anti-eavesdropping (Section III-A) and anti-jamming (Section IV-A), anomaly detection is a method used to identify and mitigate unexpected deviations from or irregularities in normal operational patterns by monitoring communication channels in the LAENet [127], [183]." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.583, + 0.923, + 0.947 + ], + "angle": 0, + "content": "Jamming anomalies generally aim to disrupt the normal operation of UAV communication links, such as by injecting disruptive signals to interfere with the legitimate communication process. The study in [98] proposes a novel Self-Awareness (SA) module to leverage the radio to detect abnormal behaviors caused by jamming attacks for Cognitive UAV communications. The SA module unsupervisedly learns a generative model using a Hierarchical Dynamic Bayesian Network (HDBN) [184] to represent the joint distribution of random variables characterizing the radio environment at different levels of abstraction and across time, where the Modified Bayesian Filtering [185] is used to integrate multilevel abnormality measurements for online predictions of radio environmental states at different levels. Since jamming can disrupt and shift the distributions of the radio environment, the abnormalities can be detected by calculating the Kullback-Leibler Divergence (KLD) and Dhattacharyya distance (DB) [186] between predictive messages and diagnostic messages. The predictive messages are generated by the HDBN to capture the expected patterns of normal signals, and diagnostic messages reflect the actual state of the signal. The jammer's impact is characterized by calculating generalized errors based on shifts in amplitude, phase, and frequency of signals, allowing the radio to predict future activities of the jammer. The SA" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.907, + 0.032, + 0.92, + 0.041 + ], + "angle": 0, + "content": "22" + }, + { + "type": "image_caption", + "bbox": [ + 0.084, + 0.073, + 0.242, + 0.083 + ], + "angle": 0, + "content": "Part A. HDBN-based Scheme" + }, + { + "type": "image", + "bbox": [ + 0.084, + 0.092, + 0.507, + 0.212 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.088, + 0.222, + 0.243, + 0.232 + ], + "angle": 0, + "content": "Part B. GDBN-based Scheme" + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.236, + 0.506, + 0.296 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.527, + 0.074, + 0.718, + 0.084 + ], + "angle": 0, + "content": "Part C. Active-GDBN-based Scheme" + }, + { + "type": "image", + "bbox": [ + 0.527, + 0.094, + 0.907, + 0.299 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.074, + 0.318, + 0.922, + 0.387 + ], + "angle": 0, + "content": "Fig. 10. The overall architecture illustrates jamming anomaly detection to enhance communication integrity in the LAEnet. Part A presents an HDBN-based scheme focusing on hierarchical dynamic models to predict and detect abnormal signals caused by jammers. It details the transition probabilities between model states and the prediction of continuous states based on discrete superstates. Part B introduces a GDBN-based scheme, extending the HDBN approach by incorporating generalized states and observations, allowing for more nuanced predictions and error calculations. Part C depicts an Active-GDBN-based scheme, integrating UAV actions into the model to actively infer and adapt to the environment, thereby optimizing resource allocation and anti-jamming measures." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.412, + 0.492, + 0.456 + ], + "angle": 0, + "content": "module achieves a near \\(100\\%\\) abnormality detection accuracy, approximately \\(12\\%\\) higher than the traditional energy detector-based scheme." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.459, + 0.493, + 0.776 + ], + "angle": 0, + "content": "Different from the previous work [98], which introduced the SA module using HDBN for anomaly detection, the authors in [99] propose a Generalized Dynamic Bayesian Network (GDBN)-based framework to enhance the SA module by further classifying the detected anomalies caused by multiple jammers. A generalized state-space model [184] is used to represent the evolving radio environment as a GDBN model learned in an unsupervised manner. Different from the KLD/DB metric in [1], Kullback-Leibler divergence and Bhattacharyya distance are used as abnormality measurements between predicted and observed signals to detect jamming. Once an abnormality indicative of jamming is detected, the UAV extracts the interfering signal and compares it with prelearned GDBN models (each representing a different jamming modulation scheme). By evaluating which pre-learned model best explains the extracted jamming signal, the UAV can not only detect the presence of a jammer but also classify its modulation type. Simulation results show that the GDBN-based method achieves an overall classification accuracy of \\(98\\%\\) at \\(\\mathrm{SNR} = 10\\) dB, outperforming LSTM \\((88\\%)\\), CNN \\((67\\%)\\), and SAE \\((90\\%)\\)." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.779, + 0.492, + 0.946 + ], + "angle": 0, + "content": "Based on the study [99], the authors in [100] propose an Active-GDBN to model the dynamic interaction between the UAV and jammer for anomaly detection. Similar to [99], the generalized state-space model [184] is used to capture the features and dynamic evolution of UAV signals to represent the radio environment. Differently from passive detection and classification of jamming signals in [99], the Active-GDBN achieves active anomaly detection by incorporating the UAV's actions into the inference process. Specifically, the UAV employs a Modified Markov Jump Particle Filter (M-MJPF) [187] to link the UAV's actions to environmental" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.411, + 0.921, + 0.503 + ], + "angle": 0, + "content": "states and observations. Meanwhile, the UAV dynamically adjusts physical resource block selections to evade jamming by encoding jammer behavior and updating beliefs. The Active-GDBN achieves about \\(25\\%\\) to \\(37.5\\%\\) faster convergence on anomaly detection probability than the Q-learning-based baseline under various jamming types." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.505, + 0.922, + 0.852 + ], + "angle": 0, + "content": "Different from previous works [98]–[100] that detect jamming anomalies based on the statistical distribution divergence of the signal, study [101] focuses on detecting anomalies by exploiting the time delays, shifts, and modulation of the signal characteristics. Firstly, achieving blind channel estimation involves constructing cyclic correlation matrices to identify distinct Doppler shifts and time delays associated with transmissions by exploiting the inherent almost-cyclostationary (ACS) properties of UAV and jammer signals (e.g., periodic statistics from OFDM modulation). Then, this blind estimation process is combined with a widely linear minimum mean square error (WL-MMSE) filter to provide an initial estimate of the symbol vector by leveraging the non-circular statistics of the received signal, where the initial estimate includes contributions from both the UAV and the jammer. Finally, a post-sorting algorithm (PSA) is employed to iteratively decode and cancel the jammer's contribution by ranking and removing symbols with the highest signal-to-disturbance-plus-noise ratio (SDNR). Simulation results demonstrate that the proposed scheme can effectively detect and separate the jamming signals from UAV signals without requiring prior knowledge of the jammer's characteristics, even when the jammer's power is twice as strong as the UAV's power." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.855, + 0.922, + 0.945 + ], + "angle": 0, + "content": "In addition to jamming anomalies, which cause interference and security threats in the LAENet, abnormal power emissions in UAV communication networks also represent a critical type of anomaly, potentially leading to severe disruption of communication quality and violation of spectrum policies. The work in [102] proposes a cloud-based surveillance framework" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.906, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "23" + }, + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.071, + 0.915, + 0.107 + ], + "angle": 0, + "content": "TABLE XI SUMMARY OF INJECTION DEFENSE FOR COMMUNICATION INTEGRITY CIRCLES DESCRIBE THE METHODS; CORRECT MARKERS AND CROSS MARKERS REPRESENT PROS AND CONS RESPECTIVELY." + }, + { + "type": "table", + "bbox": [ + 0.097, + 0.117, + 0.903, + 0.303 + ], + "angle": 0, + "content": "
Injection typeReferenceAlgorithmPros & Cons
Jamming signal[98]HDBNHDBN-based jamming signal extraction and suppression\n✓ Autonomous learning from raw I/Q data enables adaptability to dynamic jamming patterns\nX Assume the jammer's output power remains constant during attacks
[101]SICSIC with blind channel estimation for detecting and eliminating jamming signals\n✓ Eliminate jamming signals regardless of the mobility patterns of jammers\nX Rely on sufficient cyclostationary features in the received signal
[104]DBFDBF algorithm for nullifying jamming signals\n✓ Effective suppression of jamming signals while maintaining carrier phase integrity\nX May be limited to specific GNSS frequency bands
Spoofing signal[105], [106]API & LSRSIC combined with API and LSR to recover legitimate signals from spoofing attacks\n✓ SemperFi with a single antenna does not require additional hardware\nX Limited to attackers with a power advantage lower than 15 dB
[107]Subspace projectionSubspace projection for nullifying spoofing signals\n✓ Low parameter dependency, requiring only code delays and carrier frequencies\nX Suppression performance declines if spoofing and legitimate signals have similar code delays
" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.329, + 0.493, + 0.571 + ], + "angle": 0, + "content": "to address the detection of abnormal power emissions, where the cloud server assigns spectrum resources to the UAVs and shares UAVs' spectrum usage information with the surveillance center. The surveillance center assigns the detection task to \\( K \\) surveillance nodes (SNs) for local detection of abnormal power emission, where the detection rule is based on the Lagrange multiplier method and the generalized likelihood ratio test. After local decisions, \\( K \\) SNs report results to the surveillance center, where cooperative detection of abnormal power emission is performed using the decision rule that declares an abnormal event when at least \\( L \\) out of \\( K \\) nodes detect an abnormality, where the optimal global threshold of \\( L \\) is determined by solving the constraints on the global false-alarm probabilities. Simulation results show that the global detection probability exceeds 90% when transmit power deviation exceeds 0.02W (allocated power is 0.01W)." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.576, + 0.493, + 0.879 + ], + "angle": 0, + "content": "Besides the threats of jamming and abnormal power emission, another critical anomaly that requires detection is eavesdropping in the LAENet, where malicious devices covertly intercept sensitive information during UAV-to-ground and UAV-to-UAV transmissions [66], [67]. Note that most previous works on anti-eavesdropping focused on measuring secure performance through secrecy rate and/or secrecy outage probability (such as [71], [76]) rather than emphasizing the detection of eavesdropping attacks. The work in [103] explores anomaly detection for eavesdropping attacks in UAV-aided wireless systems using unsupervised learning. Two datasets are prepared: artificial training data (ATD), simulated without eavesdropping based on CSI (all labeled normal), and a practical dataset extracted from received signal features (mean and variance of amplitude). Two types of unsupervised learning methods are designed for anomaly detection. One-class SVM maps data to a high-dimensional space, defining a normal region where outliers are detected. K-means clustering classifies test data into two clusters, labeling the one nearest to the ATD center as normal." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.884, + 0.492, + 0.947 + ], + "angle": 0, + "content": "Lesson Learned For jamming anomalies, the statistical distribution divergence detection and signal structural feature-based detection, such as HDBN, GDBN, and ACS, are used to model the dynamic environment and detect deviations from" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.329, + 0.923, + 0.676 + ], + "angle": 0, + "content": "learned normal patterns. For abnormal transmission power detection, a cloud-based surveillance framework supports a statistical distribution detection approach to monitor and identify power emission outliers. Leveraging its high computing power, the cloud enables cooperative analysis through multi-source data aggregation, dynamically optimizes detection thresholds using global information, and maintains a feedback loop for adaptive anomaly detection. For eavesdropping detection, unsupervised learning techniques, including One-Class SVM and K-means clustering, achieve the identification of anomalies in received signals. These approaches effectively achieve anomaly detection and demonstrate excellent performance. However, challenges remain, including the reliance on high-quality training data and the complexity of maintaining real-time adaptability in dynamic spectrum environments. Currently, Generative AI such as GANs and generative diffusion models presents a promising research direction for anomaly detection, as demonstrated in the use of generalized models in HDBN and the artificial data generation for training ML and clustering models in [188], [189]. Generative AI could further enrich training datasets and provide a high-level generative model to enhance anomaly detection in the dynamic and uncertain LAENet." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.71, + 0.651, + 0.725 + ], + "angle": 0, + "content": "B. Injection Defense" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.733, + 0.923, + 0.945 + ], + "angle": 0, + "content": "The low-altitude economy is highly dependent on open communication and network architecture with dense communication links, which brings injection attacks as a significant threat to UAV communication integrity [28], [46]. These attacks involve the deliberate injection of malicious signals, such as jamming and spoofing signals, to disrupt or manipulate legitimate communications [97], [190]. Jamming signal injection can make legitimate signals unrecognizable by emitting high-power electromagnetic interference to degrade signal reception [98]. Additionally, spoofing signal injection can transmit high-power signals to overshadow legitimate GNSS signals. Therefore, eliminating injection signals or separating them from legitimate signals is crucial for ensuring communication integrity in the LAENet." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.907, + 0.032, + 0.92, + 0.041 + ], + "angle": 0, + "content": "24" + }, + { + "type": "image_caption", + "bbox": [ + 0.088, + 0.073, + 0.386, + 0.084 + ], + "angle": 0, + "content": "Part A. SNDR-based SIC for jamming injection defense" + }, + { + "type": "image", + "bbox": [ + 0.086, + 0.089, + 0.465, + 0.349 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.495, + 0.074, + 0.814, + 0.085 + ], + "angle": 0, + "content": "Part B. SIC with API and LSR for spoofing injection defense" + }, + { + "type": "image", + "bbox": [ + 0.495, + 0.098, + 0.913, + 0.237 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.493, + 0.253, + 0.81, + 0.266 + ], + "angle": 0, + "content": "Part C. Subspace projection for spoofing injection defense" + }, + { + "type": "image", + "bbox": [ + 0.496, + 0.277, + 0.905, + 0.34 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.074, + 0.363, + 0.922, + 0.412 + ], + "angle": 0, + "content": "Fig. 11. The overall architecture of injection defense mechanisms for UAVs in smart city applications. Part A presents the SIC architecture that processes channel state information to defend against jamming injection attacks [101]. Part B shows an SIC architecture integrated with API and LSR modules, which subtracts injection signals from the received signal to recover normal signals [105], [106]. Part C depicts a subspace projection-based architecture for spoofing injection defense, where the received signal is projected onto the orthogonal null space of the spoofing signals to eliminate them [107]." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.436, + 0.492, + 0.786 + ], + "angle": 0, + "content": "The UAV's communication can be severely disrupted by jammers that exploit LoS propagation to inject jamming signals into the transmission channel, which may effectively mask legitimate signals and render them unrecognizable [101]. The work in [98] proposes an HDBN-based injection defense scheme to extract and remove the jammer's signal. This work first utilizes the HDBN to detect abnormal behaviors caused by jamming attacks, as mentioned earlier in Section V-A. Once the jammer's presence is confirmed, its signal characteristics are analyzed across multiple levels of continuous in-phase (I) and quadrature (Q) components and observation-level state vectors [191]. The extracted jammer signal is then separated from the received observation using frequency-domain subtraction [192], component-wise I/Q processing, and adaptive filtering [191]. The corrected signal is subsequently demodulated and decoded using techniques and error correction coding to restore the original signal. To maintain resilience against evolving jamming tactics, the system continuously updates the HDBN model to improve suppression commands. Simulation results show that the mean square error (MSE) of suppression commands decreases as the JSR increases, meaning that jammers attacking with higher power can be better estimated than jammers attacking with lower power." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.794, + 0.491, + 0.947 + ], + "angle": 0, + "content": "Different from the work in [98], which separates the jamming signal by analyzing its I/Q characteristics, the study in [101] proposes a Serial Interference Cancellation (SIC) scheme based on SDNR to eliminate injected anomalous signals in UAV communications, as shown in part A of Fig. 11. First, blind channel estimation and a WL-MMSE filter are used to identify UAV and jammer signals (as detailed in Section V-A). Then, the PSA ranks detected symbols based on SDNR, where the jamming signals rank higher in SDNR due to their higher emitted power. The SIC [193]," + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.436, + 0.923, + 0.603 + ], + "angle": 0, + "content": "[194] is subsequently designed for progressively eliminating jamming signals. Specifically, the high-rank jamming symbol is decoded, reconstructed using estimated channel parameters, and subtracted from the received signal. The process continues iteratively to eliminate previously detected jamming signals until all UAV symbols are successfully recovered, with the receiver dynamically updating channel estimation to adapt to jammer mobility and environmental changes. Simulation results demonstrate that the UAV signal can be recovered with low bit error rates \\((< 10^{-4})\\) even when the power of the jammer is double that of the UAV." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.607, + 0.923, + 0.925 + ], + "angle": 0, + "content": "Jamming attacks not only affect U2G and UAV-to-UAV communications but also cause RF interference, leading to UAVs failing to track GNSS signals in the LAENet. In light of this, the work in [104] proposes a self-calibrating digital beamforming (DBF) algorithm to effectively nullify jamming signals while preserving high-precision carrier phase measurements. It calibrates the antenna array's steering vectors and RF channel characteristics. Once calibration is complete, the system performs jamming detection and direction estimation by analyzing interference patterns across the antenna array. Then, the minimum power distortionless response (MPDR) optimization rule is used to calculate optimal beamforming weights, which aim to create nulls in the beam pattern corresponding to the directions of jamming signals, effectively suppressing them. The calculated beamforming weights are applied to the received signals to produce the beamformer output, which effectively suppresses jamming signals while preserving the carrier phase integrity of the desired signals. The proposed scheme achieves up to 80 dB Jammer-to-Signal Ratio (JSR) suppression, significantly outperforming the conventional Power Inversion (PI) scheme." + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.93, + 0.922, + 0.946 + ], + "angle": 0, + "content": "In addition to jamming signals, spoofing attacks can easily" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.907, + 0.032, + 0.92, + 0.04 + ], + "angle": 0, + "content": "25" + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.07, + 0.49, + 0.431 + ], + "angle": 0, + "content": "transmit fake signals to manipulate GNSS signals due to their open signal structure and weak signal strength [195]. One type of method is based on signal encryption or data encryption to prevent malicious spoofers from injecting illegitimate signals [196]–[198]. However, they may not be suitable for resource-constrained UAVs in the LAENet. Therefore, defending against spoofing signal injection based on different signal characteristics is a promising solution. The authors in [105], [106] propose an anti-spoofing system, called SemperFi, to autonomously recover legitimate signals during active spoofing for UAVs. The system employs two core modules: the Adversarial Peak Identifier (API) and the Legitimate Signal Retriever (LSR), as shown in part B of Fig. 11. The API detects spoofed signals by correlating inertial measurement unit (IMU) data with calculated position-velocity-time (PVT) solutions [199]. The LSR module replicates the spoofing signal once it is identified. Then, similar to the study in [101], the SIC is applied to subtract the replica from the composite received signal that contains legitimate and spoofing signals. SemperFi enters an iterative refinement process if spoofing signals persist after initial cancellation, where replication, subtraction, and reassessment are performed until the spoofing detector no longer triggers an alarm, indicating sufficient attenuation or elimination of spoofing." + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.433, + 0.49, + 0.747 + ], + "angle": 0, + "content": "Besides recovering legitimate signals by subtracting spoofing signals from the received signal [101], [105], [106], projecting the signal is also a viable injection defense strategy. In the study [107], the GNSS receiver's spoofing mitigation algorithm employs a subspace projection-based interference cancellation method to effectively eliminate spoofing signals, as shown in part C of Fig. 11. Specifically, the receiver on UAVs acquires and tracks incoming signals, identifying potential spoofing signals and reconstructing them based on their power levels, pseudo-random noise (PRN) code delays, and carrier frequencies. Then, the receiver uses these reconstructed spoofing signals to construct a spoofing subspace, which represents all possible linear combinations of spoofing signal characteristics. To effectively remove spoofing signals from the received signal, the receiver performs orthogonal projection to obtain a cleaned signal by mapping the received signal onto a complementary null space that is mathematically orthogonal to the spoofing subspace. Simulation results show that shorter projection lengths suppress spoofing signals more effectively than longer projections, achieving a \\(20\\mathrm{dB}\\) gain in Signal-to-Interference Ratio (SIR)." + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.749, + 0.49, + 0.944 + ], + "angle": 0, + "content": "Lesson Learned The above-mentioned studies have demonstrated the effectiveness for mitigating injection signals, such as jamming and spoofing attacks, thereby enhancing UAV communication reliability and security. These advancements leverage techniques that not only detect malicious signal interference but also enable autonomous recovery. One key advantage is that non-cooperative detection techniques, such as blind estimation [101] and self-awareness models [98], allow for efficient attack identification without requiring prior knowledge of the attacker's signal characteristics to adapt to dynamic and adversarial environments. However, several challenges remain in that beamforming-based or spatial filtering techniques rely on multi-antenna configurations [101]," + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.07, + 0.92, + 0.174 + ], + "angle": 0, + "content": "[104], limiting their applicability in cost-sensitive or small UAV systems. Future work should explore lightweight and energy-efficient implementations of injection defense to support stable UAV signal integrity protection. Additionally, more intelligent injection defense strategies combining optimization methods, RL, and ML could enhance resilience against more sophisticated adversaries." + }, + { + "type": "title", + "bbox": [ + 0.584, + 0.194, + 0.844, + 0.206 + ], + "angle": 0, + "content": "VI. FUTURE RESEARCH DIRECTIONS" + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.214, + 0.8, + 0.227 + ], + "angle": 0, + "content": "A. Energy-efficient Physical Layer Security" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.233, + 0.92, + 0.44 + ], + "angle": 0, + "content": "Future work can focus on exploring more unique physical characteristics of wireless communication, such as exploiting channel characteristics and implementing simple coding schemes, to develop secure and low-energy protocols. Meanwhile, drones in the LAENet need to develop adaptive power control strategies that dynamically adjust transmission power based on channel conditions and security requirements to minimize unnecessary energy consumption [200]. Moreover, dynamic trajectory optimization is equally important for energy efficiency [201]. Future research can explore enabling UAVs to learn attack patterns in real time, share secure trajectory models across swarms, and dynamically adjust flight paths based on real-time security and power consumption feedback." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.464, + 0.891, + 0.478 + ], + "angle": 0, + "content": "B. Multi-drone Collaboration for Secure Communication" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.484, + 0.92, + 0.662 + ], + "angle": 0, + "content": "Future research on secure physical layer communication in the LAENet should move beyond existing dual-UAV collaboration models and explore distributed multi-UAV coordination (or UAV swarms) for enhanced resilience against jamming, spoofing, and unauthorized access [202]. For example, UAV swarms can collaboratively emit interference signals to obscure unauthorized receivers, thereby enhancing the confidentiality of communications [20]. Additionally, the integration of adaptive trust-based mutual authentication protocols among UAVs is essential [26]. Multiple UAVs with mutually verified identities can enable dynamic and secure spectrum-sharing mechanisms to optimize resource utilization in the LAENet." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.685, + 0.77, + 0.699 + ], + "angle": 0, + "content": "C. AI-driven Security Defense Strategy" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.705, + 0.92, + 0.944 + ], + "angle": 0, + "content": "Existing AI-based security strategies mainly focus on training AI models to identify anomalous signals while having some limitations. The resource-constrained drones are unable to train high-quality AI models, making the integration of edge computing a promising approach for model training [200]. Note that AI models may be difficult to generalize in recognizing various anomalous signals because they are pre-trained on previously collected datasets of fixed size. Future work can explore leveraging GAN or diffusion models to generate datasets based on real-time captured anomalous signals [203]. Furthermore, emerging generative AI technologies, such as the diffusion model for secure network topology generation in low-altitude domains [189], [204], AI agents for human-aerial vehicle secure interaction [205], and mixture of experts for robust wireless communications [2], [206], can be explored to achieve a more autonomous and intelligent LAENet." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.906, + 0.032, + 0.921, + 0.041 + ], + "angle": 0, + "content": "26" + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.07, + 0.444, + 0.085 + ], + "angle": 0, + "content": "D. Space-Air-Ground Integrated Security Architecture" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.089, + 0.493, + 0.256 + ], + "angle": 0, + "content": "Future research can explore establishing a multi-domain physical layer security framework for LAENet to connect space, air, and ground layers, providing seamless communication coverage and cost-effective network access [55], [207]. A potential key research direction is the development of a coordinated multi-tier security mechanism, where satellites, UAVs, and terrestrial base stations collaboratively enhance physical layer security through dynamic resource allocation and interference management based on real-time CSI and environmental conditions, such as UAV mobility, channel fading, and spectrum constraints." + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.276, + 0.379, + 0.29 + ], + "angle": 0, + "content": "E. 6G-Enabled Secure UAV Communication" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.296, + 0.493, + 0.493 + ], + "angle": 0, + "content": "The advent of 6G networks presents new opportunities for LAENet. Terahertz (THz) communication can offer ultrahigh-speed data transmission capabilities for LAENet [208]. Future research can explore the integration of THz with advanced beamforming techniques to focus signals on legitimate users, thereby enhancing security and reducing the risk of interception. Furthermore, Reconfigurable Intelligent Surfaces (RIS) play a crucial role in strengthening physical layer security by intelligently controlling wireless signal propagation [209], [210]. Future work can investigate RIS-based secure beamforming strategies to mitigate adversary interception, and leverage optimization techniques and DRL to adaptively adjust beamforming against eavesdropping or jamming attacks." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.51, + 0.352, + 0.524 + ], + "angle": 0, + "content": "VII. CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.53, + 0.495, + 0.756 + ], + "angle": 0, + "content": "This paper has presented a comprehensive survey on secure physical layer communications in the LAENet, emphasizing the importance of safeguarding confidentiality, availability, and integrity in communications. It introduced the concept and architecture of the LAENet and outlined the associated security issues in physical layer communication. Then, the survey provided in-depth reviews of countermeasures for anti-eavesdropping strategies, authentication schemes, anti-jamming strategies, spoofing defenses, anomaly detection, and injection defense. Finally, the paper proposed a set of forward-looking future research directions. These discussions highlighted the critical role of secure physical layer communication in supporting the development of the LAENet and offered valuable insights for ongoing advancements in this emerging domain." + }, + { + "type": "title", + "bbox": [ + 0.236, + 0.775, + 0.333, + 0.789 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.089, + 0.795, + 0.49, + 0.841 + ], + "angle": 0, + "content": "[1] Z. Li, Z. Gao, K. Wang, Y. Mei, C. Zhu, L. Chen, X. Wu, and D. Niyato, \"Unauthorized uav countermeasure for low-altitude economy: Joint communications and jamming based on mimo cellular systems,\" IEEE Internet of Things Journal, vol. 12, no. 6, pp. 6659-6672, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.842, + 0.49, + 0.886 + ], + "angle": 0, + "content": "[2] C. Zhao, J. Wang, R. Zhang, D. Niyato, G. Sun, H. Du, D. I. Kim, and A. Jamalipour, \"Generative ai-enabled wireless communications for robust low-altitude economy networking,\" arXiv preprint arXiv:2502.18118, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.887, + 0.492, + 0.945 + ], + "angle": 0, + "content": "[3] H. A. H. Alobaidy, R. Nordin, M. J. Singh, N. F. Abdullah, A. Haniz, K. Ishizu, T. Matsumura, F. Kojima, and N. Ramli, \"Low-altitude-platform-based airborne IoT network (lap-ain) for water quality monitoring in harsh tropical environment,\" IEEE Internet of Things Journal, vol. 9, no. 20, pp. 20034-20054, 2022." + }, + { + "type": "list", + "bbox": [ + 0.089, + 0.795, + 0.492, + 0.945 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.52, + 0.071, + 0.922, + 0.106 + ], + "angle": 0, + "content": "[4] China holds central economic work conference to plan for 2024. Accessed: Dec. 12, 2023. [Online]. Available: https://english.www.gov.cn/news/202312/12/content_WS657860aecd0868f4e8e21c2.html" + }, + { + "type": "ref_text", + "bbox": [ + 0.52, + 0.107, + 0.922, + 0.151 + ], + "angle": 0, + "content": "[5] J. Qiu, D. Grace, G. Ding, M. D. Zakaria, and Q. Wu, \"Air-ground heterogeneous networks for 5g and beyond via integrating high and low altitude platforms,\" IEEE Wireless Communications, vol. 26, no. 6, pp. 140-148, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.521, + 0.152, + 0.922, + 0.195 + ], + "angle": 0, + "content": "[6] H. Ahmadinejad and A. Falahati, \"Forming a two-tier heterogeneous air-network via combination of high and low altitude platforms,\" IEEE Transactions on Vehicular Technology, vol. 71, no. 2, pp. 1989-2001, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.521, + 0.196, + 0.921, + 0.242 + ], + "angle": 0, + "content": "[7] N. Hossein Motlagh, T. Taleb, and O. Arouk, \"Low-altitude unmanned aerial vehicles-based internet of things services: Comprehensive survey and future perspectives,\" IEEE Internet of Things Journal, vol. 3, no. 6, pp. 899-922, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.521, + 0.243, + 0.921, + 0.287 + ], + "angle": 0, + "content": "[8] H. Yang, M. Zheng, Z. Shao, Y. Jiang, and Z. Xiong, \"Intelligent computation offloading and trajectory planning for 3d target search in low-altitude economy scenarios,\" IEEE Wireless Communications Letters, pp. 1-1, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.521, + 0.288, + 0.921, + 0.343 + ], + "angle": 0, + "content": "[9] R. Shakeri, M. A. Al-Garadi, A. Badawy, A. Mohamed, T. Khattab, A. K. Al-Ali, K. A. Harras, and M. Guizani, \"Design challenges of multi-uav systems in cyber-physical applications: A comprehensive survey and future directions,\" IEEE Communications Surveys & Tutorials, vol. 21, no. 4, pp. 3340-3385, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.344, + 0.921, + 0.389 + ], + "angle": 0, + "content": "[10] Y. Zhang, X. Gao, N. Ye, D. Niyato, Z. Han, and K. Yang, \"Joint uav deployment, power allocation, and coalition formation for physical layer security in heterogeneous networks,\" IEEE Transactions on Vehicular Technology, pp. 1-15, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.39, + 0.92, + 0.423 + ], + "angle": 0, + "content": "[11] Z. Liu, Y. Cao, P. Gao, X. Hua, D. Zhang, and T. Jiang, \"Multi-uav network assisted intelligent edge computing: Challenges and opportunities,\" China Communications, vol. 19, no. 3, pp. 258-278, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.424, + 0.921, + 0.469 + ], + "angle": 0, + "content": "[12] Y. Liu, X. Gong, J. Chen, S. Chen, and Y. Yang, \"Rotation-invariant siamese network for low-altitude remote-sensing image registration,\" IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing, vol. 13, pp. 5746-5758, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.469, + 0.92, + 0.503 + ], + "angle": 0, + "content": "[13] G. Cheng, X. Song, Z. Lyu, and J. Xu, \"Networked isac for low-altitude economy: Coordinated transmit beamforming and UAV trajectory design,\" IEEE Transactions on Communications, pp. 1-1, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.503, + 0.921, + 0.548 + ], + "angle": 0, + "content": "[14] G. Cheng, X. Song, Z. Lyu, and J. Xu, “Networked isac for low-altitude economy: Transmit beamforming and uav trajectory design,” in 2024 IEEE/CIC International Conference on Communications in China (ICCC), 2024, pp. 78-83." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.548, + 0.92, + 0.593 + ], + "angle": 0, + "content": "[15] X. Zheng, G. Sun, J. Li, J. Wang, Q. Wu, D. Niyato, and A. Jamalipour, \"Uav swarm-enabled collaborative post-disaster communications in low altitude economy via a two-stage optimization approach,\" arXiv preprint arXiv:2501.05742, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.594, + 0.921, + 0.627 + ], + "angle": 0, + "content": "[16] China's low-altitude economy soars at high speed. Accessed: Dec. 19, 2024. [Online]. Available: https://www.chinadaily.com.cn/a/202412/19/WS6763b8b7a310f1265a1d3d24.html" + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.628, + 0.92, + 0.672 + ], + "angle": 0, + "content": "[17] China's low-altitude economy takes flight: A new engine for innovation-driven growth. Accessed: Mar. 17, 2025. [Online]. Available: https://www.chinadaily.com.cn/a/202412/19/WS6763b8b7a310f1265a1d3d24.html" + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.673, + 0.921, + 0.718 + ], + "angle": 0, + "content": "[18] Flying air taxis move closer to us takeoff with issuing of FAA rule. Accessed: Oct. 22, 2024. [Online]. Available: https://www.usnews.com/news/business/articles/2024-10-22/flying-air-taxis-move-closer-to-us-takeoff-with-issuing-of-faa-rule" + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.718, + 0.92, + 0.762 + ], + "angle": 0, + "content": "[19] A. Rugo, C. A. Ardagna, and N. E. Ioini, “A security review in the uavnet era: Threats, countermeasures, and gap analysis,” ACM Comput. Surv., vol. 55, no. 1, Jan. 2022. [Online]. Available: https://doi.org/10.1145/3485272" + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.763, + 0.92, + 0.808 + ], + "angle": 0, + "content": "[20] X. Wang, Z. Zhao, L. Yi, Z. Ning, L. Guo, F. R. Yu, and S. Guo, \"A survey on security of uav swarm networks: Attacks and countermeasures,\" ACM Comput. Surv., vol. 57, no. 3, Nov. 2024. [Online]. Available: https://doi.org/10.1145/3703625" + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.809, + 0.921, + 0.842 + ], + "angle": 0, + "content": "[21] O. Ceviz, S. Sen, and P. Sadioglu, “A survey of security in uavs and fanets: issues, threats, analysis of attacks, and solutions,” IEEE Communications Surveys & Tutorials, pp. 1-1, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.843, + 0.92, + 0.899 + ], + "angle": 0, + "content": "[22] H. J. Hadi, Y. Cao, K. U. Nisa, A. M. Jamil, and Q. Ni, \"A comprehensive survey on security, privacy issues and emerging defence technologies for uavs,\" Journal of Network and Computer Applications, vol. 213, p. 103607, 2023. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S1084804523000267" + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.9, + 0.921, + 0.945 + ], + "angle": 0, + "content": "[23] V. Hassija, V. Chamola, A. Agrawal, A. Goyal, N. C. Luong, D. Niyato, F. R. Yu, and M. Guizani, \"Fast, reliable, and secure drone communication: A comprehensive survey,\" IEEE Communications Surveys & Tutorials, vol. 23, no. 4, pp. 2802-2832, 2021." + }, + { + "type": "list", + "bbox": [ + 0.514, + 0.071, + 0.922, + 0.945 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.906, + 0.032, + 0.92, + 0.041 + ], + "angle": 0, + "content": "27" + }, + { + "type": "ref_text", + "bbox": [ + 0.083, + 0.072, + 0.492, + 0.118 + ], + "angle": 0, + "content": "[24] B. Zolfaghari, M. Abbasmollaei, F. Hajizadeh, N. Yanai, and K. Bibak, \"Secure uav (drone) and the great promise of ai,\" ACM Comput. Surv., vol. 56, no. 11, Jul. 2024. [Online]. Available: https://doi.org/10.1145/3673225" + }, + { + "type": "ref_text", + "bbox": [ + 0.083, + 0.118, + 0.492, + 0.152 + ], + "angle": 0, + "content": "[25] X. Wei, J. Ma, and C. Sun, “A survey on security of unmanned aerial vehicle systems: Attacks and countermeasures,” IEEE Internet of Things Journal, vol. 11, no. 21, pp. 34826-34847, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.153, + 0.492, + 0.21 + ], + "angle": 0, + "content": "[26] M. Adil, M. A. Jan, Y. Liu, H. Abulkasim, A. Farouk, and H. Song, \"A systematic survey: Security threats to UAV-aided IoT applications, taxonomy, current challenges and requirements with future research directions,\" IEEE Transactions on Intelligent Transportation Systems, vol. 24, no. 2, pp. 1437-1455, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.21, + 0.492, + 0.256 + ], + "angle": 0, + "content": "[27] N. Kumar and A. Chaudhary, \"Surveying cybersecurity vulnerabilities and countermeasures for enhancing uav security,\" Computer Networks, vol. 252, p. 110695, 2024. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S1389128624005279" + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.256, + 0.492, + 0.301 + ], + "angle": 0, + "content": "[28] J. Wang, X. Wang, R. Gao, C. Lei, W. Feng, N. Ge, S. Jin, and T. Q. S. Quek, “Physical layer security for uav communications: A comprehensive survey,” China Communications, vol. 19, no. 9, pp. 77–115, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.301, + 0.492, + 0.358 + ], + "angle": 0, + "content": "[29] A. Fotouhi, H. Qiang, M. Ding, M. Hassan, L. G. Giordano, A. Garcia-Rodriguez, and J. Yuan, \"Survey on uav cellular communications: Practical aspects, standardization advancements, regulation, and security challenges,\" IEEE Communications Surveys & Tutorials, vol. 21, no. 4, pp. 3417-3442, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.359, + 0.492, + 0.405 + ], + "angle": 0, + "content": "[30] M. Adil, H. Song, S. Mastorakis, H. Abulkasim, A. Farouk, and Z. Jin, \"Uav-assisted IoT applications, cybersecurity threats, ai-enabled solutions, open challenges with future research directions,\" IEEE Transactions on Intelligent Vehicles, vol. 9, no. 4, pp. 4583-4605, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.405, + 0.492, + 0.462 + ], + "angle": 0, + "content": "[31] W. U. Khan, E. Lagunas, Z. Ali, M. A. Javed, M. Ahmed, S. Chatzinotas, B. Ottersten, and P. Popovski, \"Opportunities for physical layer security in uav communication enhanced with intelligent reflective surfaces,\" IEEE Wireless Communications, vol. 29, no. 6, pp. 22-28, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.462, + 0.492, + 0.508 + ], + "angle": 0, + "content": "[32] J. Wang, H. Du, D. Niyato, M. Zhou, J. Kang, and H. Vincent Poor, \"Acceleration estimation of signal propagation path length changes for wireless sensing,\" IEEE Transactions on Wireless Communications, vol. 23, no. 9, pp. 11476-11492, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.508, + 0.492, + 0.543 + ], + "angle": 0, + "content": "[33] T. Wang, C.-K. Wen, H. Wang, F. Gao, T. Jiang, and S. Jin, \"Deep learning for wireless physical layer: Opportunities and challenges,\" China Communications, vol. 14, no. 11, pp. 92-111, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.543, + 0.492, + 0.588 + ], + "angle": 0, + "content": "[34] J. Wang, H. Du, D. Niyato, J. Kang, S. Cui, X. Shen, and P. Zhang, \"Generative ai for integrated sensing and communication: Insights from the physical layer perspective,\" IEEE Wireless Communications, vol. 31, no. 5, pp. 246-255, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.589, + 0.492, + 0.633 + ], + "angle": 0, + "content": "[35] S. Li, L. Xiao, Y. Liu, G. Liu, P. Xiao, and T. Jiang, \"Performance analysis for orthogonal time frequency space modulation systems with generalized waveform,\" China Communications, vol. 20, no. 4, pp. 57-72, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.634, + 0.492, + 0.68 + ], + "angle": 0, + "content": "[36] N. Xie, W. Xiong, M. Sha, T. Hu, P. Zhang, L. Huang, and D. Niyato, \"Physical layer authentication with high compatibility using an encoding approach,\" IEEE Transactions on Communications, vol. 70, no. 12, pp. 8270-8285, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.68, + 0.492, + 0.737 + ], + "angle": 0, + "content": "[37] S. Liu, T. Wang, and S. Wang, \"Toward intelligent wireless communications: Deep learning - based physical layer technologies,\" Digital Communications and Networks, vol. 7, no. 4, pp. 589-597, 2021. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S2352864821000742" + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.738, + 0.492, + 0.784 + ], + "angle": 0, + "content": "[38] Y. Zhang, Y. Peng, X. Tang, L. Xiao, and T. Jiang, \"Large-scale fading decoding aided user-centric cell-free massive mimo: Uplink error probability analysis and detector design,\" IEEE Transactions on Wireless Communications, vol. 23, no. 8, pp. 10336-10349, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.784, + 0.492, + 0.83 + ], + "angle": 0, + "content": "[39] H. Du, J. Wang, D. Niyato, J. Kang, Z. Xiong, J. Zhang, and X. Shen, \"Semantic communications for wireless sensing: Ris-aided encoding and self-supervised decoding,\" IEEE Journal on Selected Areas in Communications, vol. 41, no. 8, pp. 2547-2562, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.83, + 0.492, + 0.875 + ], + "angle": 0, + "content": "[40] P. Yang, X. Xi, K. Guo, T. Q. S. Quek, J. Chen, and X. Cao, \"Proactive uav network slicing for urllc and mobile broadband service multiplexing,\" IEEE Journal on Selected Areas in Communications, vol. 39, no. 10, pp. 3225-3244, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.875, + 0.492, + 0.92 + ], + "angle": 0, + "content": "[41] J. Huang, A. Wang, G. Sun, J. Li, J. Wang, H. Du, and D. Niyato, \"Dual uav cluster-assisted maritime physical layer secure communications via collaborative beamforming,\" IEEE Internet of Things Journal, pp. 1-1, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.921, + 0.492, + 0.945 + ], + "angle": 0, + "content": "[42] Z. Duan, Z. Chang, N. Xie, W. Sun, and D. T. Niyato, \"Adaptive strategies in enhancing physical layer security: A comprehensive" + }, + { + "type": "list", + "bbox": [ + 0.083, + 0.072, + 0.492, + 0.945 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.541, + 0.072, + 0.92, + 0.096 + ], + "angle": 0, + "content": "survey,\" ACM Comput. Surv., vol. 57, no. 7, Feb. 2025. [Online]. Available: https://doi.org/10.1145/3715319" + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.096, + 0.921, + 0.129 + ], + "angle": 0, + "content": "[43] Q. Wang, Z. Chen, W. Mei, and J. Fang, \"Improving physical layer security using uav-enabled mobile relaying,\" IEEE Wireless Communications Letters, vol. 6, no. 3, pp. 310-313, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.129, + 0.921, + 0.185 + ], + "angle": 0, + "content": "[44] S. Liu, H. Yang, M. Zheng, L. Xiao, Z. Xiong, and D. Niyato, “Uav-enabled semantic communication in mobile edge computing under jamming attacks: An intelligent resource management approach,” IEEE Transactions on Wireless Communications, vol. 23, no. 11, pp. 17 493–17 507, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.185, + 0.921, + 0.231 + ], + "angle": 0, + "content": "[45] S. Bi, K. Li, S. Hu, W. Ni, C. Wang, and X. Wang, “Detection and mitigation of position spoofing attacks on cooperative uav swarm formations,” IEEE Transactions on Information Forensics and Security, vol. 19, pp. 1883–1895, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.231, + 0.921, + 0.265 + ], + "angle": 0, + "content": "[46] X. Sun, D. W. K. Ng, Z. Ding, Y. Xu, and Z. Zhong, \"Physical layer security in uav systems: Challenges and opportunities,\" IEEE Wireless Communications, vol. 26, no. 5, pp. 40-47, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.265, + 0.921, + 0.31 + ], + "angle": 0, + "content": "[47] G. Zhang, Q. Hu, Y. Zhang, Y. Dai, and T. Jiang, \"Lightweight cross-domain authentication scheme for securing wireless IoT devices using backscatter communication,\" IEEE Internet of Things Journal, vol. 11, no. 12, pp. 22021-22035, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.31, + 0.921, + 0.344 + ], + "angle": 0, + "content": "[48] Q. Wu, W. Mei, and R. Zhang, \"Safeguarding wireless network with uavs: A physical layer security perspective,\" IEEE Wireless Communications, vol. 26, no. 5, pp. 12-18, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.344, + 0.921, + 0.379 + ], + "angle": 0, + "content": "[49] H.-M. Wang, X. Zhang, and J.-C. Jiang, “Uav-involved wireless physical-layer secure communications: Overview and research directions,” IEEE Wireless Communications, vol. 26, no. 5, pp. 32-39, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.378, + 0.921, + 0.412 + ], + "angle": 0, + "content": "[50] B. Li, Z. Fei, Y. Zhang, and M. Guizani, \"Secure uav communication networks over 5g,\" IEEE Wireless Communications, vol. 26, no. 5, pp. 114-120, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.412, + 0.921, + 0.457 + ], + "angle": 0, + "content": "[51] L. Bai, L. Zhu, J. Liu, J. Choi, and W. Zhang, \"Physical layer authentication in wireless communication networks: A survey,\" Journal of Communications and Information Networks, vol. 5, no. 3, pp. 237-264, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.457, + 0.921, + 0.492 + ], + "angle": 0, + "content": "[52] N. Xie, Z. Li, and H. Tan, \"A survey of physical-layer authentication in wireless communications,\" IEEE Communications Surveys & Tutorials, vol. 23, no. 1, pp. 282-310, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.492, + 0.921, + 0.526 + ], + "angle": 0, + "content": "[53] Y. Xu, T. Zhang, D. Yang, Y. Liu, and M. Tao, \"Joint resource and trajectory optimization for security in uav-assisted mec systems,\" IEEE Transactions on Communications, vol. 69, no. 1, pp. 573-588, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.526, + 0.921, + 0.571 + ], + "angle": 0, + "content": "[54] Y. Zhang, Z. Kuang, Y. Feng, and F. Hou, \"Task offloading and trajectory optimization for secure communications in dynamic user multi-uav mec systems,\" IEEE Transactions on Mobile Computing, vol. 23, no. 12, pp. 14427-14440, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.571, + 0.921, + 0.616 + ], + "angle": 0, + "content": "[55] Y. Zhang, X. Gao, H. Yuan, K. Yang, J. Kang, P. Wang, and D. Niyato, \"Joint uav trajectory and power allocation with hybrid fso/rf for secure space-air-ground communications,\" IEEE Internet of Things Journal, vol. 11, no. 19, pp. 31407-31421, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.616, + 0.921, + 0.661 + ], + "angle": 0, + "content": "[56] W. Wang, X. Li, R. Wang, K. Cumanan, W. Feng, Z. Ding, and O. A. Dobre, \"Robust 3d-trajectory and time switching optimization for dual-uav-enabled secure communications,\" IEEE Journal on Selected Areas in Communications, vol. 39, no. 11, pp. 3334-3347, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.661, + 0.921, + 0.707 + ], + "angle": 0, + "content": "[57] C. Wen, L. Qiu, and X. Liang, \"Securing uav communication with mobile uav eavesdroppers: Joint trajectory and communication design,\" in 2021 IEEE Wireless Communications and Networking Conference (WCNC), 2021, pp. 1-6." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.707, + 0.921, + 0.762 + ], + "angle": 0, + "content": "[58] W. Lu, Y. Ding, Y. Gao, S. Hu, Y. Wu, N. Zhao, and Y. Gong, \"Resource and trajectory optimization for secure communications in dual unmanned aerial vehicle mobile edge computing systems,\" IEEE Transactions on Industrial Informatics, vol. 18, no. 4, pp. 2704-2713, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.762, + 0.921, + 0.809 + ], + "angle": 0, + "content": "[59] F. Lu, G. Liu, W. Lu, Y. Gao, J. Cao, N. Zhao, and A. Nallanathan, \"Resource and trajectory optimization for uav-relay-assisted secure maritime mec,\" IEEE Transactions on Communications, vol. 72, no. 3, pp. 1641-1652, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.809, + 0.921, + 0.854 + ], + "angle": 0, + "content": "[60] A. S. Abdalla, A. Behfarnia, and V. Marojevic, \"Uav trajectory and multi-user beamforming optimization for clustered users against passive eavesdropping attacks with unknown csi,\" IEEE Transactions on Vehicular Technology, vol. 72, no. 11, pp. 14426-14442, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.854, + 0.921, + 0.899 + ], + "angle": 0, + "content": "[61] Y. Ding, H. Han, W. Lu, Y. Wang, N. Zhao, X. Wang, and X. Yang, \"Ddqn-based trajectory and resource optimization for uav-aided mec secure communications,\" IEEE Transactions on Vehicular Technology, vol. 73, no. 4, pp. 6006-6011, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.899, + 0.921, + 0.945 + ], + "angle": 0, + "content": "[62] H. Kang, X. Chang, J. Mišić, V. B. Mišić, J. Fan, and J. Bai, “Improving dual-uav aided ground-uav bi-directional communication security: Joint uav trajectory and transmit power optimization,” IEEE Transactions on Vehicular Technology, vol. 71, no. 10, pp. 10570–10583, 2022." + }, + { + "type": "list", + "bbox": [ + 0.514, + 0.072, + 0.921, + 0.945 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.906, + 0.032, + 0.92, + 0.041 + ], + "angle": 0, + "content": "28" + }, + { + "type": "ref_text", + "bbox": [ + 0.083, + 0.072, + 0.492, + 0.118 + ], + "angle": 0, + "content": "[63] Y. Zhang, Z. Mou, F. Gao, J. Jiang, R. Ding, and Z. Han, \"Uav-enabled secure communications by multi-agent deep reinforcement learning,\" IEEE Transactions on Vehicular Technology, vol. 69, no. 10, pp. 11599-11611, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.083, + 0.119, + 0.492, + 0.164 + ], + "angle": 0, + "content": "[64] Y. Liu, C. Huang, G. Chen, R. Song, S. Song, and P. Xiao, “Deep learning empowered trajectory and passive beamforming design in uav-ris enabled secure cognitive non-terrestrial networks,” IEEE Wireless Communications Letters, vol. 13, no. 1, pp. 188–192, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.165, + 0.492, + 0.22 + ], + "angle": 0, + "content": "[65] J. Wang, R. Wang, Z. Zheng, R. Lin, L. Wu, and F. Shu, \"Physical layer security enhancement in uav-assisted cooperative jamming for cognitive radio networks: A mappo-lstm deep reinforcement learning approach,\" IEEE Transactions on Vehicular Technology, pp. 1-14, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.222, + 0.492, + 0.257 + ], + "angle": 0, + "content": "[66] X. Tang, N. Liu, R. Zhang, and Z. Han, \"Deep learning-assisted secure uav-relaying networks with channel uncertainties,\" IEEE Transactions on Vehicular Technology, vol. 71, no. 5, pp. 5048-5059, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.258, + 0.492, + 0.292 + ], + "angle": 0, + "content": "[67] X. Li, R. Yao, Y. Fan, P. Wang, and J. Xu, \"Secure efficiency map-enabled uav trajectory planning,\" IEEE Wireless Communications Letters, vol. 12, no. 8, pp. 1324-1328, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.293, + 0.492, + 0.339 + ], + "angle": 0, + "content": "[68] R. Karmakar, G. Kaddoum, and O. Akhrif, “A novel federated learning-based smart power and 3d trajectory control for fairness optimization in secure uav-assisted mec services,” IEEE Transactions on Mobile Computing, vol. 23, no. 5, pp. 4832–4848, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.34, + 0.492, + 0.374 + ], + "angle": 0, + "content": "[69] Z. Li, X. Liao, J. Shi, L. Li, and P. Xiao, “Md-gan-based uav trajectory and power optimization for cognitive covert communications,” IEEE Internet of Things Journal, vol. 9, no. 12, pp. 10187-10199, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.375, + 0.492, + 0.42 + ], + "angle": 0, + "content": "[70] S. Jia, L. Xiaomeng, L. Xiaomin, T. Zhuangzhuang, and H. Junfan, \"Covert leo satellite communication aided by generative adversarial network based cooperative uav jamming,\" China Communications, vol. 21, no. 9, pp. 27-39, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.421, + 0.492, + 0.467 + ], + "angle": 0, + "content": "[71] C. Zhang, G. Sun, J. Li, Q. Wu, J. Wang, D. Niyato, and Y. Liu, \"Multi-objective aerial collaborative secure communication optimization via generative diffusion model-enabled deep reinforcement learning,\" IEEE Transactions on Mobile Computing, pp. 1-18, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.468, + 0.492, + 0.513 + ], + "angle": 0, + "content": "[72] T. Alladi, Naren, G. Bansal, V. Chamola, and M. Guizani, \"Secauthuav: A novel authentication scheme for uav-ground station and uav-uav communication,\" IEEE Transactions on Vehicular Technology, vol. 69, no. 12, pp. 15068-15077, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.514, + 0.492, + 0.56 + ], + "angle": 0, + "content": "[73] R. Karmakar, G. Kaddoum, and O. Akhrif, \"A puf and fuzzy extractor-based uav-ground station and uav-uav authentication mechanism with intelligent adaptation of secure sessions,\" IEEE Transactions on Mobile Computing, vol. 23, no. 5, pp. 3858-3875, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.561, + 0.492, + 0.606 + ], + "angle": 0, + "content": "[74] M. Tanveer, A. Aldosary, S.-u.-d. Khokhar, A. K. Das, S. A. Aldossari, and S. A. Chaudhry, “Paf-iod: Puf-enabled authentication framework for the internet of drones,” IEEE Transactions on Vehicular Technology, vol. 73, no. 7, pp. 9560–9574, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.607, + 0.492, + 0.653 + ], + "angle": 0, + "content": "[75] S. J. Maeng, Y. Yapici, i. Guvenc, A. Bhuyan, and H. Dai, “Precoder design for physical-layer security and authentication in massive mimo uav communications,” IEEE Transactions on Vehicular Technology, vol. 71, no. 3, pp. 2949–2964, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.654, + 0.492, + 0.699 + ], + "angle": 0, + "content": "[76] Y. Zhou, Z. Ma, H. Liu, P. L. Yeoh, Y. Li, B. Vucetic, and P. Fan, \"A uav-aided physical layer authentication based on channel characteristics and geographical locations,\" IEEE Transactions on Vehicular Technology, vol. 73, no. 1, pp. 1053–1064, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.7, + 0.492, + 0.735 + ], + "angle": 0, + "content": "[77] Y. Zhou, Y. Wang, Z. Ma, P. Fan, and M. Xiao, \"Physical layer authentication for uav communications under rayleigh and rician channels,\" IEEE Transactions on Wireless Communications, pp. 1-1, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.736, + 0.492, + 0.769 + ], + "angle": 0, + "content": "[78] Y.-S. Shiu, S. Y. Chang, H.-C. Wu, S. C.-H. Huang, and H.-H. Chen, \"Physical layer security in wireless networks: a tutorial,\" IEEE Wireless Communications, vol. 18, no. 2, pp. 66-74, 2011." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.77, + 0.492, + 0.804 + ], + "angle": 0, + "content": "[79] J. Xu, D. Li, Z. Zhu, Z. Yang, N. Zhao, and D. Niyato, “Anti-jamming design for integrated sensing and communication via aerial iris,” IEEE Transactions on Communications, vol. 72, no. 8, pp. 4607–4619, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.806, + 0.492, + 0.851 + ], + "angle": 0, + "content": "[80] B. Duo, Q. Wu, X. Yuan, and R. Zhang, “Anti-jamming 3d trajectory design for uav-enabled wireless sensor networks under probabilistic loss channel,” IEEE Transactions on Vehicular Technology, vol. 69, no. 12, pp. 16288-16293, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.852, + 0.492, + 0.897 + ], + "angle": 0, + "content": "[81] Y. Wu, W. Yang, X. Guan, and Q. Wu, \"Energy-efficient trajectory design for uav-enabled communication under malicious jamming,\" IEEE Wireless Communications Letters, vol. 10, no. 2, pp. 206-210, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.898, + 0.492, + 0.944 + ], + "angle": 0, + "content": "[82] Y. Wu, W. Yang, X. Guan, and Q. Wu, \"Uav-enabled relay communication under malicious jamming: Joint trajectory and transmit power optimization,\" IEEE Transactions on Vehicular Technology, vol. 70, no. 8, pp. 8275-8279, 2021." + }, + { + "type": "list", + "bbox": [ + 0.083, + 0.072, + 0.492, + 0.944 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.071, + 0.92, + 0.106 + ], + "angle": 0, + "content": "[83] M. A. Aref, S. K. Jayaweera, and S. Machuzak, \"Multi-agent reinforcement learning based cognitive anti-jamming,\" in 2017 IEEE Wireless Communications and Networking Conference (WCNC), 2017, pp. 1-6." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.107, + 0.92, + 0.152 + ], + "angle": 0, + "content": "[84] L. Jia, F. Yao, Y. Sun, Y. Xu, S. Feng, and A. Anpalagan, “A hierarchical learning solution for anti-jamming stackelberg game with discrete power strategies,” IEEE Wireless Communications Letters, vol. 6, no. 6, pp. 818–821, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.153, + 0.92, + 0.197 + ], + "angle": 0, + "content": "[85] X. Liu, Y. Xu, L. Jia, Q. Wu, and A. Anpalagan, “Anti-jamming communications using spectrum waterfall: A deep reinforcement learning approach,” IEEE Communications Letters, vol. 22, no. 5, pp. 998–1001, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.198, + 0.92, + 0.255 + ], + "angle": 0, + "content": "[86] H. Yang, Z. Xiong, J. Zhao, D. Niyato, Q. Wu, H. V. Poor, and M. Tornatore, \"Intelligent reflecting surface assisted anti-jamming communications: A fast reinforcement learning approach,\" IEEE Transactions on Wireless Communications, vol. 20, no. 3, pp. 1963-1974, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.255, + 0.92, + 0.3 + ], + "angle": 0, + "content": "[87] Z. Yin, Y. Lin, Y. Zhang, Y. Qian, F. Shu, and J. Li, \"Collaborative multiagent reinforcement learning aided resource allocation for uav anti-jamming communication,\" IEEE Internet of Things Journal, vol. 9, no. 23, pp. 23995-24008, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.301, + 0.92, + 0.347 + ], + "angle": 0, + "content": "[88] Y. Ma, K. Liu, Y. Liu, X. Wang, and Z. Zhao, \"An intelligent game-based anti-jamming solution using adversarial populations for aerial communication networks,\" IEEE Transactions on Cognitive Communications and Networking, pp. 1-1, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.348, + 0.92, + 0.393 + ], + "angle": 0, + "content": "[89] Z. Shao, H. Yang, L. Xiao, W. Su, Y. Chen, and Z. Xiong, \"Deep reinforcement learning-based resource management for uav-assisted mobile edge computing against jamming,\" IEEE Transactions on Mobile Computing, vol. 23, no. 12, pp. 13358-13374, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.394, + 0.92, + 0.438 + ], + "angle": 0, + "content": "[90] Y. Zhou, P. L. Yeoh, K. J. Kim, Z. Ma, Y. Li, and B. Vucetic, \"Game theoretic physical layer authentication for spoofing detection in uav communications,\" IEEE Transactions on Vehicular Technology, vol. 71, no. 6, pp. 6750-6755, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.439, + 0.92, + 0.485 + ], + "angle": 0, + "content": "[91] Q. Cheng, Y. Zhou, H. Liu, L. Yang, Z. Ma, and P. Fan, \"Physical layer authentication in uav communications with channel randomness and jamming uncertainty,\" IEEE Transactions on Vehicular Technology, pp. 1-6, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.485, + 0.92, + 0.519 + ], + "angle": 0, + "content": "[92] A. Eldosouky, A. Ferdowsi, and W. Saad, “Drones in distress: A game-theoretic countermeasure for protecting uavs against gps spoofing,” IEEE Internet of Things Journal, vol. 7, no. 4, pp. 2840–2854, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.52, + 0.92, + 0.554 + ], + "angle": 0, + "content": "[93] D. She, W. Wang, Z. Yin, J. Wang, and H. Shan, \"Gps spoofing attack recognition for uavs with limited samples,\" IEEE Internet of Things Journal, vol. 12, no. 1, pp. 250-261, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.555, + 0.92, + 0.599 + ], + "angle": 0, + "content": "[94] Y. Dang, C. Benzaid, B. Yang, T. Taleb, and Y. Shen, \"Deep-ensemble-learning-based gps spoofing detection for cellular-connected uavs,\" IEEE Internet of Things Journal, vol. 9, no. 24, pp. 25068-25085, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.6, + 0.92, + 0.645 + ], + "angle": 0, + "content": "[95] X. Wang, J. Wang, Y. Xu, J. Chen, L. Jia, X. Liu, and Y. Yang, \"Dynamic spectrum anti-jamming communications: Challenges and opportunities,\" IEEE Communications Magazine, vol. 58, no. 2, pp. 79-85, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.646, + 0.92, + 0.69 + ], + "angle": 0, + "content": "[96] L. Zhang, G. Ding, Q. Wu, and Z. Han, \"Spectrum sensing under spectrum misuse behaviors: A multi-hypothesis test perspective,\" IEEE Transactions on Information Forensics and Security, vol. 13, no. 4, pp. 993-1007, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.691, + 0.92, + 0.726 + ], + "angle": 0, + "content": "[97] S. C. Hassler, U. A. Mughal, and M. Ismail, “Cyber-physical intrusion detection system for unmanned aerial vehicles,” IEEE Transactions on Intelligent Transportation Systems, vol. 25, no. 6, pp. 6106–6117, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.727, + 0.92, + 0.772 + ], + "angle": 0, + "content": "[98] A. Krayani, A. S. Alam, L. Marcenaro, A. Nallanathan, and C. Regazzoni, \"An emergent self-awareness module for physical layer security in cognitive uav radios,\" IEEE Transactions on Cognitive Communications and Networking, vol. 8, no. 2, pp. 888-906, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.773, + 0.92, + 0.817 + ], + "angle": 0, + "content": "[99] A. Krayani, A. S. Alam, L. Marcenaro, A. Nallanathan, and C. Regazzoni, \"Automatic jamming signal classification in cognitive uav radios,\" IEEE Transactions on Vehicular Technology, vol. 71, no. 12, pp. 12972-12988, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.818, + 0.92, + 0.864 + ], + "angle": 0, + "content": "[100] A. Krayani, A. S. Alam, L. Marcenaro, A. Nallanathan, and C. Regazzoni, “A novel resource allocation for anti-jamming in cognitive-uavs: An active inference approach,” IEEE Communications Letters, vol. 26, no. 10, pp. 2272–2276, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.864, + 0.92, + 0.91 + ], + "angle": 0, + "content": "[101] D. Darsena, G. Gelli, I. Iudice, and F. Verde, “Detection and blind channel estimation for uav-aided wireless sensor networks in smart cities under mobile jamming attack,” IEEE Internet of Things Journal, vol. 9, no. 14, pp. 11932–11950, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.91, + 0.92, + 0.944 + ], + "angle": 0, + "content": "[102] L. Zhang, G. Ding, Q. Wu, and P. Liu, “Detection of abnormal power emission in uav communication networks,” IEEE Wireless Communications Letters, vol. 8, no. 4, pp. 1179–1182, 2019." + }, + { + "type": "list", + "bbox": [ + 0.51, + 0.071, + 0.92, + 0.944 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.906, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "29" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.071, + 0.492, + 0.118 + ], + "angle": 0, + "content": "[103] T. M. Hoang, N. M. Nguyen, and T. Q. Duong, “Detection of eavesdropping attack in uav-aided wireless systems: Unsupervised learning with one-class svm and k-means clustering,” IEEE Wireless Communications Letters, vol. 9, no. 2, pp. 139–142, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.118, + 0.492, + 0.163 + ], + "angle": 0, + "content": "[104] Y. An, R. Kang, Y. Ban, and S. Yang, “Beidou receiver based on anti-jamming antenna arrays with self-calibration for precise relative positioning,” Journal of Systems Engineering and Electronics, vol. 35, no. 5, pp. 1132–1147, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.163, + 0.492, + 0.231 + ], + "angle": 0, + "content": "[105] H. Sathaye and A. Ranganathan, “Semperfi: a psychoer eliminating standalone gps receiver,” in Proceedings of the 13th ACM Conference on Security and Privacy in Wireless and Mobile Networks, ser. WiSec '20. New York, NY, USA: Association for Computing Machinery, 2020, p. 353–355. [Online]. Available: https://doi.org/10.1145/3395351.3401703" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.231, + 0.492, + 0.265 + ], + "angle": 0, + "content": "[106] H. Sathaye, G. LaMountain, P. Closas, and A. Ranganathan, “Semperfi: Anti-spoofing gps receiver for uavs,” in Network and Distributed Systems Security (NDSS) Symposium 2022, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.265, + 0.492, + 0.299 + ], + "angle": 0, + "content": "[107] S. Han, L. Chen, W. Meng, and C. Li, \"Improve the security of gnsss receivers through spoofing mitigation,\" IEEE Access, vol. 5, pp. 21057-21069, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.299, + 0.492, + 0.333 + ], + "angle": 0, + "content": "[108] X. Ye, Y. Mao, X. Yu, S. Sun, L. Fu, and J. Xu, \"Integrated sensing and communications for low-altitude economy: A deep reinforcement learning approach,\" arXiv preprint arXiv:2412.04074, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.333, + 0.492, + 0.389 + ], + "angle": 0, + "content": "[109] C. Huang, S. Fang, H. Wu, Y. Wang, and Y. Yang, \"Low-altitude intelligent transportation: System architecture, infrastructure, and key technologies,\" Journal of Industrial Information Integration, vol. 42, p. 100694, 2024. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S2452414X24001377" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.39, + 0.492, + 0.434 + ], + "angle": 0, + "content": "[110] Y. Yang, Y. Chen, J. Wang, G. Sun, and D. Niyato, \"Embodied aiempowered low altitude economy: Integrated sensing, communications, computation, and control (isc3),\" arXiv preprint arXiv:2412.19996, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.435, + 0.492, + 0.48 + ], + "angle": 0, + "content": "[111] J. Li, G. Sun, Q. Wu, S. Liang, J. Wang, D. Niyato, and D. I. Kim, \"Aerial secure collaborative communications under eavesdropper collusion in low-altitude economy: A generative swarm intelligent approach,\" arXiv preprint arXiv:2503.00721, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.481, + 0.492, + 0.514 + ], + "angle": 0, + "content": "[112] G. Sun, W. Xie, D. Niyato, H. Du, J. Kang, J. Wu, S. Sun, and P. Zhang, \"Generative ai for advanced uav networking,\" IEEE Network, pp. 1-1, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.514, + 0.492, + 0.559 + ], + "angle": 0, + "content": "[113] X. Tang, X. Li, R. Yu, Y. Wu, J. Ye, F. Tang, and Q. Chen, \"Digital-twin-assisted task assignment in multi-uav systems: A deep reinforcement learning approach,\" IEEE Internet of Things Journal, vol. 10, no. 17, pp. 15362-15375, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.559, + 0.492, + 0.605 + ], + "angle": 0, + "content": "[114] X. Tang, Q. Chen, R. Yu, and X. Li, \"Digital twin-empowered task assignment in aerial mec network: A resource coalition cooperation approach with generative model,\" IEEE Transactions on Network Science and Engineering, vol. 12, no. 1, pp. 13-27, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.606, + 0.492, + 0.65 + ], + "angle": 0, + "content": "[115] Y. Jiang, X. Li, G. Zhu, H. Li, J. Deng, and Q. Shi, \"6g non-terrestrial networks enabled low-altitude economy: Opportunities and challenges,\" ArXiv, vol. abs/2311.09047, 2023. [Online]. Available: https://api_semanticscholar.org/CorpusID:265213350" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.65, + 0.492, + 0.684 + ], + "angle": 0, + "content": "[116] X. Luo, Y. Zhang, Z. He, G. Yang, and Z. Ji, \"A two-step environment-learning-based method for optimal uav deployment,\" IEEE Access, vol. 7, pp. 149328-149340, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.684, + 0.492, + 0.729 + ], + "angle": 0, + "content": "[117] X. Tang, Q. Chen, W. Weng, B. Liao, J. Wang, X. Cao, and X. Li, \"Dnn task assignment in uav networks: A generative ai enhanced multi-agent reinforcement learning approach,\" IEEE Internet of Things Journal, pp. 1-1, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.729, + 0.492, + 0.785 + ], + "angle": 0, + "content": "[118] H. Yang, J. Zhao, Z. Xiong, K.-Y. Lam, S. Sun, and L. Xiao, \"Privacy-preserving federated learning for uav-enabled networks: Learning-based joint scheduling and resource management,\" IEEE Journal on Selected Areas in Communications, vol. 39, no. 10, pp. 3144-3159, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.786, + 0.492, + 0.842 + ], + "angle": 0, + "content": "[119] X. Cai, T. Izydorczyk, J. Rodríguez-Pineiro, I. Z. Kovács, J. Wigard, F. M. L. Tavares, and P. E. Mogensen, \"Empirical low-altitude air-to-ground spatial channel characterization for cellular networks connectivity,\" IEEE Journal on Selected Areas in Communications, vol. 39, no. 10, pp. 2975-2991, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.843, + 0.492, + 0.887 + ], + "angle": 0, + "content": "[120] Y. Zhao, F. Zhou, L. Feng, W. Li, Y. Sun, and M. A. Imran, \"Backhaul-constrained coverage analysis of integrated high and low altitude platforms aerial communication system in post-disaster areas,\" IEEE Communications Letters, vol. 27, no. 6, pp. 1629-1633, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.888, + 0.492, + 0.945 + ], + "angle": 0, + "content": "[121] S. H. Alsamhi, F. A. Almalki, F. Afghah, A. Hawbani, A. V. Shvetsov, B. Lee, and H. Song, \"Drones' edge intelligence over smart environments in b5g: Blockchain and federated learning synergy,\" IEEE Transactions on Green Communications and Networking, vol. 6, no. 1, pp. 295-312, 2022." + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.071, + 0.492, + 0.945 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.071, + 0.921, + 0.118 + ], + "angle": 0, + "content": "[122] A. Ahmad, A. A. Cheema, and D. Finlay, \"A survey of radio propagation channel modelling for low altitude flying base stations,\" Computer Networks, vol. 171, p. 107122, 2020. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S1389128619310692" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.118, + 0.921, + 0.152 + ], + "angle": 0, + "content": "[123] I. Bozcan and E. Kayacan, \"Context-dependent anomaly detection for low altitude traffic surveillance,\" in 2021 IEEE International Conference on Robotics and Automation (ICRA), 2021, pp. 224-230." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.152, + 0.921, + 0.197 + ], + "angle": 0, + "content": "[124] Y. Liu, X. Gong, and Y. Yang, \"A multilayer fusion network with rotation-invariant and dynamic feature representation for multiview low-altitude image registration,\" IEEE Geoscience and Remote Sensing Letters, vol. 18, no. 6, pp. 1019-1023, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.197, + 0.921, + 0.232 + ], + "angle": 0, + "content": "[125] A. Omri and M. O. Hasna, \"Physical layer security analysis of uav based communication networks,\" in 2018 IEEE 88th Vehicular Technology Conference (VTC-Fall), 2018, pp. 1-6." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.232, + 0.921, + 0.265 + ], + "angle": 0, + "content": "[126] S. Samonas and D. Coss, “The cia strikes back: Redefining confidentiality, integrity and availability in security.” Journal of Information System Security, vol. 10, no. 3, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.265, + 0.921, + 0.31 + ], + "angle": 0, + "content": "[127] C. Zhao, H. Du, D. Niyato, J. Kang, Z. Xiong, D. I. Kim, X. Shen, and K. B. Letaief, \"Generative ai for secure physical layer communications: A survey,\" IEEE Transactions on Cognitive Communications and Networking, vol. 11, no. 1, pp. 3-26, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.31, + 0.921, + 0.355 + ], + "angle": 0, + "content": "[128] J. M. Hamamreh, H. M. Furqan, and H. Arslan, \"Classifications and applications of physical layer security techniques for confidentiality: A comprehensive survey,\" IEEE Communications Surveys & Tutorials, vol. 21, no. 2, pp. 1773-1828, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.355, + 0.921, + 0.389 + ], + "angle": 0, + "content": "[129] M. Shakiba-Herfeh, A. Chorti, and H. Vincent Poor, “Physical layer security: Authentication, integrity, and confidentiality,” Physical layer security, pp. 129–150, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.39, + 0.921, + 0.434 + ], + "angle": 0, + "content": "[130] S. Hu, Q. Wu, and X. Wang, \"Energy management and trajectory optimization for uav-enabled legitimate monitoring systems,\" IEEE Transactions on Wireless Communications, vol. 20, no. 1, pp. 142-155, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.435, + 0.921, + 0.469 + ], + "angle": 0, + "content": "[131] D. Wang, B. Bai, W. Zhao, and Z. Han, “A survey of optimization approaches for wireless physical layer security,” IEEE Communications Surveys & Tutorials, vol. 21, no. 2, pp. 1878–1911, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.469, + 0.921, + 0.514 + ], + "angle": 0, + "content": "[132] M. A. Arfaoui, M. D. Soltani, I. Tavakkolnia, A. Ghrayeb, M. Safari, C. M. Assi, and H. Haas, \"Physical layer security for visible light communication systems: A survey,\" IEEE Communications Surveys & Tutorials, vol. 22, no. 3, pp. 1887-1908, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.514, + 0.921, + 0.559 + ], + "angle": 0, + "content": "[133] Z. Yin, M. Jia, N. Cheng, W. Wang, F. Lyu, Q. Guo, and X. Shen, \"Uav-assisted physical layer security in multi-beam satellite-enabled vehicle communications,\" IEEE Transactions on Intelligent Transportation Systems, vol. 23, no. 3, pp. 2739-2751, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.559, + 0.921, + 0.605 + ], + "angle": 0, + "content": "[134] X. Fang, N. Zhang, S. Zhang, D. Chen, X. Sha, and X. Shen, \"On physical layer security: Weighted fractional fourier transform based user cooperation,\" IEEE Transactions on Wireless Communications, vol. 16, no. 8, pp. 5498-5510, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.605, + 0.921, + 0.65 + ], + "angle": 0, + "content": "[135] W. Tian, X. Ding, G. Liu, Y. Dai, and Z. Han, “A uav-assisted secure communication system by jointly optimizing transmit power and trajectory in the internet of things,” IEEE Transactions on Green Communications and Networking, vol. 7, no. 4, pp. 2025–2037, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.65, + 0.921, + 0.706 + ], + "angle": 0, + "content": "[136] F. Irram, M. Ali, M. Naeem, and S. Mumtaz, \"Physical layer security for beyond 5g/6g networks: Emerging technologies and future directions,\" Journal of Network and Computer Applications, vol. 206, p. 103431, 2022. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S108480452200087X" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.706, + 0.921, + 0.763 + ], + "angle": 0, + "content": "[137] W. Lu, P. Si, F. Lu, B. Li, Z. Liu, S. Hu, and Y. Gong, \"Resource and trajectory optimization in uav-powered wireless communication system,\" Science China Information Sciences, vol. 64, no. 4, p. 140304, Mar 2021, accessed: 2025-01-03. [Online]. Available: https://doi.org/10.1007/s11432-020-3060-4" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.763, + 0.921, + 0.808 + ], + "angle": 0, + "content": "[138] J. Luo, Z. Wang, M. Xia, L. Wu, Y. Tian, and Y. Chen, \"Path planning for uav communication networks: Related technologies, solutions, and opportunities,\" ACM Comput. Surv., vol. 55, no. 9, Jan. 2023. [Online]. Available: https://doi-org.remotexs.ntu.edu.sg/10.1145/3560261" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.808, + 0.921, + 0.854 + ], + "angle": 0, + "content": "[139] A. V. Savkin, H. Huang, and W. Ni, “Securing uav communication in the presence of stationary or mobile eavesdroppers via online 3d trajectory planning,” IEEE Wireless Communications Letters, vol. 9, no. 8, pp. 1211–1215, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.854, + 0.921, + 0.899 + ], + "angle": 0, + "content": "[140] X. Zhou, Q. Wu, S. Yan, F. Shu, and J. Li, \"Uav-enabled secure communications: Joint trajectory and transmit power optimization,\" IEEE Transactions on Vehicular Technology, vol. 68, no. 4, pp. 4069-4073, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.899, + 0.921, + 0.945 + ], + "angle": 0, + "content": "[141] R. Ding, F. Gao, and X. S. Shen, \"3d uav trajectory design and frequency band allocation for energy-efficient and fair communication: A deep reinforcement learning approach,\" IEEE Transactions on Wireless Communications, vol. 19, no. 12, pp. 7796-7809, 2020." + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.071, + 0.921, + 0.945 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.907, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "30" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.072, + 0.492, + 0.106 + ], + "angle": 0, + "content": "[142] C. Zhong, J. Yao, and J. Xu, \"Secure uav communication with cooperative jamming and trajectory control,\" IEEE Communications Letters, vol. 23, no. 2, pp. 286-289, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.107, + 0.492, + 0.152 + ], + "angle": 0, + "content": "[143] Y. Bai, H. Zhao, X. Zhang, Z. Chang, R. Jantti, and K. Yang, \"Toward autonomous multi-uav wireless network: A survey of reinforcement learning-based approaches,\" IEEE Communications Surveys & Tutorials, vol. 25, no. 4, pp. 3038-3067, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.153, + 0.492, + 0.198 + ], + "angle": 0, + "content": "[144] R. Dong, B. Wang, K. Cao, J. Tian, and T. Cheng, \"Secure transmission design of ris enabled uav communication networks exploiting deep reinforcement learning,\" IEEE Transactions on Vehicular Technology, vol. 73, no. 6, pp. 8404-8419, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.199, + 0.492, + 0.243 + ], + "angle": 0, + "content": "[145] X. Tang, T. Jiang, J. Liu, B. Li, D. Zhai, F. R. Yu, and Z. Han, \"Secure communication with uav-enabled aerial ris: Learning trajectory with reflection optimization,\" IEEE Transactions on Intelligent Vehicles, pp. 1-10, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.245, + 0.492, + 0.29 + ], + "angle": 0, + "content": "[146] J. Duan, Y. Guan, S. E. Li, Y. Ren, Q. Sun, and B. Cheng, \"Distribu-tional soft actor-critic: Off-policy reinforcement learning for addressing value estimation errors,\" IEEE Transactions on Neural Networks and Learning Systems, vol. 33, no. 11, pp. 6584-6598, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.291, + 0.492, + 0.335 + ], + "angle": 0, + "content": "[147] W. Chen, X. Qiu, T. Cai, H.-N. Dai, Z. Zheng, and Y. Zhang, “Deep reinforcement learning for internet of things: A comprehensive survey,” IEEE Communications Surveys & Tutorials, vol. 23, no. 3, pp. 1659–1692, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.336, + 0.492, + 0.382 + ], + "angle": 0, + "content": "[148] F. Tang, H. Hofner, N. Kato, K. Kaneko, Y. Yamashita, and M. Hangai, “A deep reinforcement learning-based dynamic traffic offloading in space-air-ground integrated networks (sagin),” IEEE Journal on Selected Areas in Communications, vol. 40, no. 1, pp. 276–289, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.383, + 0.492, + 0.428 + ], + "angle": 0, + "content": "[149] N. Yang, S. Chen, H. Zhang, and R. Berry, “Beyond the edge: An advanced exploration of reinforcement learning for mobile edge computing, its applications, and future research trajectories,” IEEE Communications Surveys & Tutorials, pp. 1-1, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.429, + 0.492, + 0.462 + ], + "angle": 0, + "content": "[150] Q. Mao, F. Hu, and Q. Hao, “Deep learning for intelligent wireless networks: A comprehensive survey,” IEEE Communications Surveys & Tutorials, vol. 20, no. 4, pp. 2595–2621, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.463, + 0.492, + 0.508 + ], + "angle": 0, + "content": "[151] P. Consul, I. Budhiraja, and D. Garg, \"A hybrid secure resource allocation and trajectory optimization approach for mobile edge computing using federated learning based on web 3.0,\" IEEE Transactions on Consumer Electronics, vol. 70, no. 1, pp. 1167-1179, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.508, + 0.492, + 0.543 + ], + "angle": 0, + "content": "[152] X. Hou, J. Wang, Z. Zhang, J. Wang, L. Liu, and Y. Ren, \"Split federated learning for uav-enabled integrated sensing, computation, and communication,\" arXiv preprint arXiv:2504.01443, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.544, + 0.492, + 0.587 + ], + "angle": 0, + "content": "[153] K. Heo, W. Lee, and K. Lee, “Uav-assisted wireless-powered secure communications: Integration of optimization and deep learning,” IEEE Transactions on Wireless Communications, vol. 23, no. 9, pp. 10530–10545, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.589, + 0.492, + 0.623 + ], + "angle": 0, + "content": "[154] U. A. Mughal, Y. Alkhrijah, A. Almadhor, and C. Yuen, “Deep learning for secure uav-assisted ris communication networks,” IEEE Internet of Things Magazine, vol. 7, no. 2, pp. 38-44, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.624, + 0.492, + 0.657 + ], + "angle": 0, + "content": "[155] R. Dong, B. Wang, and K. Cao, \"Deep learning driven 3d robust beamforming for secure communication of uav systems,\" IEEE Wireless Communications Letters, vol. 10, no. 8, pp. 1643-1647, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.659, + 0.492, + 0.702 + ], + "angle": 0, + "content": "[156] M. Chen, U. Challita, W. Saad, C. Yin, and M. Debbah, \"Artificial neural networks-based machine learning for wireless networks: A tutorial,\" IEEE Communications Surveys & Tutorials, vol. 21, no. 4, pp. 3039-3071, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.704, + 0.492, + 0.748 + ], + "angle": 0, + "content": "[157] M. T. Nguyen and L. B. Le, “Multi-uav trajectory control, resource allocation, and nomai user pairing for uplink energy minimization,” IEEE Internet of Things Journal, vol. 9, no. 23, pp. 23 728–23 740, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.749, + 0.492, + 0.794 + ], + "angle": 0, + "content": "[158] X. Liao, J. Shi, Z. Li, L. Zhang, and B. Xia, “A model-driven deep reinforcement learning heuristic algorithm for resource allocation in ultra-dense cellular networks,” IEEE Transactions on Vehicular Technology, vol. 69, no. 1, pp. 983–997, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.795, + 0.492, + 0.84 + ], + "angle": 0, + "content": "[159] X. Liao, J. Si, J. Shi, Z. Li, and H. Ding, \"Generative adversarial network assisted power allocation for cooperative cognitive covert communication system,\" IEEE Communications Letters, vol. 24, no. 7, pp. 1463-1467, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.841, + 0.492, + 0.886 + ], + "angle": 0, + "content": "[160] Y. Zhou, P. L. Yeoh, H. Chen, Y. Li, R. Schober, L. Zhuo, and B. Vucetic, \"Improving physical layer security via a uav friendly jammer for unknown eavesdropper location,\" IEEE Transactions on Vehicular Technology, vol. 67, no. 11, pp. 11280-11284, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.887, + 0.492, + 0.921 + ], + "angle": 0, + "content": "[161] H. Cao, C. Tan, Z. Gao, Y. Xu, G. Chen, P.-A. Heng, and S. Z. Li, “A survey on generative diffusion models,” IEEE Transactions on Knowledge and Data Engineering, vol. 36, no. 7, pp. 2814–2830, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.922, + 0.492, + 0.944 + ], + "angle": 0, + "content": "[162] D. Chen, N. Zhang, N. Cheng, K. Zhang, Z. Qin, and X. Shen, \"Physical layer based message authentication with secure channel" + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.072, + 0.492, + 0.944 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.541, + 0.072, + 0.921, + 0.095 + ], + "angle": 0, + "content": "codes,\" IEEE Transactions on Dependable and Secure Computing, vol. 17, no. 5, pp. 1079-1093, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.096, + 0.921, + 0.129 + ], + "angle": 0, + "content": "[163] G. Bansal and B. Sikdar, “S-maps: Scalable mutual authentication protocol for dynamic uav swarms,” IEEE Transactions on Vehicular Technology, vol. 70, no. 11, pp. 12088-12100, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.13, + 0.921, + 0.173 + ], + "angle": 0, + "content": "[164] B. Chatterjee, D. Das, S. Maity, and S. Sen, \"Rf-puf: Enhancing iot security through authentication of wireless nodes using in-situ machine learning,\" IEEE Internet of Things Journal, vol. 6, no. 1, pp. 388-398, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.175, + 0.921, + 0.219 + ], + "angle": 0, + "content": "[165] G. Bansal, N. Naren, V. Chamola, B. Sikdar, N. Kumar, and M. Guizani, \"Lightweight mutual authentication protocol for v2g using physical unclonable function,\" IEEE Transactions on Vehicular Technology, vol. 69, no. 7, pp. 7234-7246, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.22, + 0.921, + 0.264 + ], + "angle": 0, + "content": "[166] C. Pu, A. Wall, K.-K. R. Choo, I. Ahmed, and S. Lim, \"A lightweight and privacy-preserving mutual authentication and key agreement protocol for internet of drones environment,\" IEEE Internet of Things Journal, vol. 9, no. 12, pp. 9918-9933, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.265, + 0.921, + 0.32 + ], + "angle": 0, + "content": "[167] Z. Zhang, C. Hsu, M. H. Au, L. Harn, J. Cui, Z. Xia, and Z. Zhao, \"Prlap-iod: A puf-based robust and lightweight authentication protocol for internet of drones,\" Computer Networks, vol. 238, p. 110118, 2024. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S1389128623005637" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.321, + 0.921, + 0.355 + ], + "angle": 0, + "content": "[168] J. Liu and X. Wang, \"Physical layer authentication enhancement using two-dimensional channel quantization,\" IEEE Transactions on Wireless Communications, vol. 15, no. 6, pp. 4171-4182, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.356, + 0.921, + 0.389 + ], + "angle": 0, + "content": "[169] X. Lu, J. Lei, Y. Shi, and W. Li, \"Improved physical layer authentication scheme based on wireless channel phase,\" IEEE Wireless Communications Letters, vol. 11, no. 1, pp. 198-202, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.39, + 0.921, + 0.423 + ], + "angle": 0, + "content": "[170] N. Xie, J. Chen, and L. Huang, “Physical-layer authentication using multiple channel-based features,” IEEE Transactions on Information Forensics and Security, vol. 16, pp. 2356-2366, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.424, + 0.921, + 0.469 + ], + "angle": 0, + "content": "[171] Y. Zhou, Z. Ma, H. Liu, P. L. Yeoh, Y. Li, and B. Vucetic, \"Signal-to-noise ratio based physical layer authentication in uav communications,\" in 2023 IEEE 34th Annual International Symposium on Personal, Indoor and Mobile Radio Communications (PIMRC), 2023, pp. 1-6." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.47, + 0.921, + 0.513 + ], + "angle": 0, + "content": "[172] Y. Shang, Y. Peng, R. Ye, and J. Lee, “Ris-assisted secure uav communication scheme against active jamming and passive eavesdropping,” IEEE Transactions on Intelligent Transportation Systems, vol. 25, no. 11, pp. 16953-16963, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.515, + 0.921, + 0.558 + ], + "angle": 0, + "content": "[173] Y. Wu, X. Guan, W. Yang, and Q. Wu, “Uav swarm communication under malicious jamming: Joint trajectory and clustering design,” IEEE Wireless Communications Letters, vol. 10, no. 10, pp. 2264–2268, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.559, + 0.921, + 0.604 + ], + "angle": 0, + "content": "[174] Z. Shen, K. Xu, and X. Xia, \"Beam-domain anti-jamming transmission for downlink massive mimo systems: A stackelberg game perspective,\" IEEE Transactions on Information Forensics and Security, vol. 16, pp. 2727-2742, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.605, + 0.921, + 0.639 + ], + "angle": 0, + "content": "[175] X. Li, J. Chen, X. Ling, and T. Wu, “Deep reinforcement learning-based anti-jamming algorithm using dual action network,” IEEE Transactions on Wireless Communications, vol. 22, no. 7, pp. 4625–4637, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.639, + 0.921, + 0.672 + ], + "angle": 0, + "content": "[176] L. Jia, N. Qi, F. Chu, S. Fang, X. Wang, S. Ma, and S. Feng, \"Game-theoretic learning anti-jamming approaches in wireless networks,\" IEEE Communications Magazine, vol. 60, no. 5, pp. 60-66, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.673, + 0.921, + 0.706 + ], + "angle": 0, + "content": "[177] F. Yao and L. Jia, “A collaborative multi-agent reinforcement learning anti-jamming algorithm in wireless networks,” IEEE Wireless Communications Letters, vol. 8, no. 4, pp. 1024–1027, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.707, + 0.921, + 0.75 + ], + "angle": 0, + "content": "[178] E. Schmidt, N. Gatsis, and D. Akopian, “A gps spoofing detection and classification correlator-based technique using the lasso,” IEEE Transactions on Aerospace and Electronic Systems, vol. 56, no. 6, pp. 4224–4237, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.751, + 0.921, + 0.786 + ], + "angle": 0, + "content": "[179] B. Pardhasaradhi and L. R. Cenkeramaddi, \"Gps spoofing detection and mitigation for drones using distributed radar tracking and fusion,\" IEEE Sensors Journal, vol. 22, no. 11, pp. 11 122-11 134, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.786, + 0.921, + 0.819 + ], + "angle": 0, + "content": "[180] Z. Chen, J. Li, J. Li, X. Zhu, and C. Li, \"Gnss multiparameter spoofing detection method based on support vector machine,\" IEEE Sensors Journal, vol. 22, no. 18, pp. 17864-17874, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.82, + 0.921, + 0.864 + ], + "angle": 0, + "content": "[181] X. Chen, D. He, X. Yan, W. Yu, and T.-K. Truong, \"Gnss interference type recognition with fingerprint spectrum dnn method,\" IEEE Transactions on Aerospace and Electronic Systems, vol. 58, no. 5, pp. 4745-4760, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.865, + 0.921, + 0.91 + ], + "angle": 0, + "content": "[182] Y. Dang, C. Benzaïd, Y. Shen, and T. Taleb, \"Gps spoofing detector with adaptive trustable residence area for cellular based-uavs,\" in GLOBECOM 2020 - 2020 IEEE Global Communications Conference, 2020, pp. 1-6." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.91, + 0.921, + 0.944 + ], + "angle": 0, + "content": "[183] V. Chandola, A. Banerjee, and V. Kumar, \"Anomaly detection: A survey,\" ACM Comput. Surv., vol. 41, no. 3, Jul. 2009. [Online]. Available: https://doi.org/10.1145/1541880.1541882" + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.072, + 0.921, + 0.944 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.906, + 0.032, + 0.92, + 0.041 + ], + "angle": 0, + "content": "31" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.072, + 0.492, + 0.106 + ], + "angle": 0, + "content": "[184] B. Balaji and K. Friston, \"Bayesian state estimation using generalized coordinates,\" Signal processing, sensor fusion, and target recognition XX, vol. 8050, pp. 716-727, 2011." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.108, + 0.492, + 0.153 + ], + "angle": 0, + "content": "[185] M. Baydoun, D. Campo, V. Sanguineti, L. Marcenaro, A. Cavallaro, and C. Regazzoni, “Learning switching models for abnormality detection for autonomous driving,” in 2018 21st International Conference on Information Fusion (FUSION), 2018, pp. 2606–2613." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.154, + 0.492, + 0.176 + ], + "angle": 0, + "content": "[186] L. Pardo, Statistical inference based on divergence measures. Chapman and Hall/CRC, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.178, + 0.492, + 0.223 + ], + "angle": 0, + "content": "[187] A. Krayani, M. Baydoun, L. Marcenaro, A. S. Alam, and C. Regazzoni, \"Self-learning bayesian generative models for jammer detection in cognitive-uav-radios,\" in GLOBECOM 2020 - 2020 IEEE Global Communications Conference, 2020, pp. 1-7." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.224, + 0.492, + 0.269 + ], + "angle": 0, + "content": "[188] W. Xie, G. Sun, J. Wang, H. Du, J. Kang, K. Huang, and V. Leung, “Multi-objective aerial iris-assisted isac optimization via generative ai-enhanced deep reinforcement learning,” arXiv preprint arXiv:2502.10687, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.27, + 0.492, + 0.305 + ], + "angle": 0, + "content": "[189] J. Wang, H. Du, Y. Liu, G. Sun, D. Niyato, S. Mao, D. I. Kim, and X. Shen, \"Generative ai based secure wireless sensing for isac networks,\" arXiv preprint arXiv:2408.11398, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.306, + 0.492, + 0.34 + ], + "angle": 0, + "content": "[190] X. Wang, C. P. Tan, Y. Wang, and X. Wang, “Defending uav networks against covert attacks using auxiliary signal injections,” IEEE Transactions on Automation Science and Engineering, pp. 1–13, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.341, + 0.492, + 0.385 + ], + "angle": 0, + "content": "[191] M. Valkama, M. Renfors, and V. Koivunen, “Advanced methods for i/q imbalance compensation in communication receivers,” IEEE Transactions on Signal Processing, vol. 49, no. 10, pp. 2335–2344, 2001." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.387, + 0.492, + 0.433 + ], + "angle": 0, + "content": "[192] J. Zhang and Y. R. Zheng, \"Frequency-domain turbo equalization with soft successive interference cancellation for single carrier mimo underwater acoustic communications,\" IEEE Transactions on Wireless Communications, vol. 10, no. 9, pp. 2872-2882, 2011." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.434, + 0.492, + 0.479 + ], + "angle": 0, + "content": "[193] P. Madhani, P. Axelrad, K. Krumvieda, and J. Thomas, \"Application of successive interference cancellation to the gps pseudolite near-far problem,\" IEEE Transactions on Aerospace and Electronic Systems, vol. 39, no. 2, pp. 481-488, 2003." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.48, + 0.492, + 0.514 + ], + "angle": 0, + "content": "[194] P. Patel and J. Holtzman, \"Analysis of a simple successive interference cancellation scheme in a ds/cdma system,\" IEEE Journal on Selected Areas in Communications, vol. 12, no. 5, pp. 796-807, 1994." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.515, + 0.492, + 0.538 + ], + "angle": 0, + "content": "[195] M. L. Psiaki and T. E. Humphreys, “Gnss spoofing and detection,” Proceedings of the IEEE, vol. 104, no. 6, pp. 1258–1270, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.539, + 0.492, + 0.573 + ], + "angle": 0, + "content": "[196] T. E. Humphreys, “Detection strategy for cryptographic gnss anti-spoofing,” IEEE Transactions on Aerospace and Electronic Systems, vol. 49, no. 2, pp. 1073–1090, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.574, + 0.492, + 0.618 + ], + "angle": 0, + "content": "[197] Z. Wu, R. Liu, and H. Cao, \"Ecdsa-based message authentication scheme for beidou-ii navigation satellite system,\" IEEE Transactions on Aerospace and Electronic Systems, vol. 55, no. 4, pp. 1666-1682, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.62, + 0.492, + 0.655 + ], + "angle": 0, + "content": "[198] K. Wesson, M. Rothlisberger, and T. Humphreys, “Practical cryptographic civilgps signal authentication,” NAVIGATION: Journal of the Institute of Navigation, vol. 59, no. 3, pp. 177–193, 2012." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.656, + 0.492, + 0.723 + ], + "angle": 0, + "content": "[199] A. Ranganathan, H. Olafsdóttir, and S. Capkun, \"Spree: a spoofing resistant gps receiver,\" in Proceedings of the 22nd Annual International Conference on Mobile Computing and Networking, ser. MobiCom '16. New York, NY, USA: Association for Computing Machinery, 2016, p. 348-360. [Online]. Available: https://doi.org/10.1145/2973750.2973753" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.725, + 0.492, + 0.77 + ], + "angle": 0, + "content": "[200] M. Ahmed, A. A. Soofi, S. Raza, F. Khan, S. Ahmad, W. U. Khan, M. Asif, F. Xu, and Z. Han, “Advancements in ris-assisted UAV for empowering multiaccess edge computing: A survey,” IEEE Internet of Things Journal, vol. 12, no. 6, pp. 6325–6346, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.771, + 0.492, + 0.805 + ], + "angle": 0, + "content": "[201] G. K. Pandey, D. S. Gurjar, S. Yadav, Y. Jiang, and C. Yuen, “Uav-assisted communications with rf energy harvesting: A comprehensive survey,” IEEE Communications Surveys & Tutorials, pp. 1-1, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.806, + 0.492, + 0.863 + ], + "angle": 0, + "content": "[202] P. Cao, L. Lei, S. Cai, G. Shen, X. Liu, X. Wang, L. Zhang, L. Zhou, and M. Guizani, \"Computational intelligence algorithms for uav swarm networking and collaboration: A comprehensive survey and future directions,\" IEEE Communications Surveys & Tutorials, vol. 26, no. 4, pp. 2684-2728, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.864, + 0.492, + 0.91 + ], + "angle": 0, + "content": "[203] P. Li, H. Zhang, Y. Wu, L. Qian, R. Yu, D. Niyato, and X. Shen, \"Filling the missing: Exploring generative ai for enhanced federated learning over heterogeneous mobile edge devices,\" IEEE Transactions on Mobile Computing, vol. 23, no. 10, pp. 10001-10015, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.91, + 0.492, + 0.944 + ], + "angle": 0, + "content": "[204] J. Wang, Y. Liu, H. Du, D. Niyato, J. Kang, H. Zhou, and D. I. Kim, \"Empowering wireless networks with artificial intelligence generated graph,\" arXiv preprint arXiv:2405.04907, 2024." + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.072, + 0.492, + 0.944 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.072, + 0.921, + 0.117 + ], + "angle": 0, + "content": "[205] M. Xu, D. Niyato, J. Kang, Z. Xiong, S. Mao, Z. Han, D. I. Kim, and K. B. Letaief, \"When large language model agents meet 6g networks: Perception, grounding, and alignment,\" IEEE Wireless Communications, vol. 31, no. 6, pp. 63-71, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.118, + 0.921, + 0.162 + ], + "angle": 0, + "content": "[206] R. Zhang, H. Du, D. Niyato, J. Kang, Z. Xiong, P. Zhang, and D. I. Kim, \"Optimizing generative ai networking: A dual perspective with multi-agent systems and mixture of experts,\" arXiv preprint arXiv:2405.12472, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.163, + 0.921, + 0.207 + ], + "angle": 0, + "content": "[207] A. H. Arani, P. Hu, and Y. Zhu, “Uav-assisted space-air-ground integrated networks: A technical review of recent learning algorithms,” IEEE Open Journal of Vehicular Technology, vol. 5, pp. 1004–1023, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.208, + 0.921, + 0.265 + ], + "angle": 0, + "content": "[208] N. T. T. Van, N. L. Tuan, N. C. Luong, T. H. Nguyen, S. Feng, S. Gong, D. Niyato, and D. I. Kim, \"Network access selection for urclc and embb applications in sub-6ghz-mmwave-thz networks: Game theory versus multi-agent reinforcement learning,\" IEEE Transactions on Communications, pp. 1-1, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.265, + 0.921, + 0.31 + ], + "angle": 0, + "content": "[209] Q. Yuan, L. Xiao, C. He, P. Xiao, and T. Jiang, \"Deep learning-based hybrid precoding for ris-aided broadband terahertz communication systems in the face of beam squint,\" IEEE Wireless Communications Letters, vol. 13, no. 2, pp. 303-307, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.311, + 0.921, + 0.366 + ], + "angle": 0, + "content": "[210] G. Geraci, A. Garcia-Rodriguez, M. M. Azari, A. Lozano, M. Mezzavilla, S. Chatzinotas, Y. Chen, S. Rangan, and M. D. Renzo, \"What will the future of uav cellular communications be? a flight from 5g to 6g,\" IEEE Communications Surveys & Tutorials, vol. 24, no. 3, pp. 1304-1335, 2022." + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.072, + 0.921, + 0.366 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09153/041c4cb9-df29-4fd7-8ac4-53350c684566_origin.pdf b/data/2025/2504_09xxx/2504.09153/041c4cb9-df29-4fd7-8ac4-53350c684566_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..61077fd37b5ab670a6df212f1b171eff95254927 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/041c4cb9-df29-4fd7-8ac4-53350c684566_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4c1518661a4829f17f55a6117070db73aed01ddef8e34125b1d820e8c678a64 +size 1396270 diff --git a/data/2025/2504_09xxx/2504.09153/full.md b/data/2025/2504_09xxx/2504.09153/full.md new file mode 100644 index 0000000000000000000000000000000000000000..5eb135e836f99a76553eebf0ec226653042955c4 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/full.md @@ -0,0 +1,818 @@ +# Secure Physical Layer Communications for Low-Altitude Economy Networking: A Survey + +Lingyi Cai, Jiacheng Wang, Ruichen Zhang, Yu Zhang, Tao Jiang, Fellow, IEEE, Dusit Niyato, Fellow, IEEE, Xianbin Wang, Fellow, IEEE, Abbas Jamalipour, Fellow, IEEE, and Xuemin Shen, Fellow, IEEE + +Abstract—The Low-Altitude Economy Networking (LAENet) is emerging as a transformative paradigm that enables an integrated and sophisticated communication infrastructure to support aerial vehicles in carrying out a wide range of economic activities within low-altitude airspace. However, the physical layer communications in the LAENet face growing security threats due to inherent characteristics of aerial communication environments, such as signal broadcast nature and channel openness. These challenges highlight the urgent need for safeguarding communication confidentiality, availability, and integrity. In view of the above, this survey comprehensively reviews existing secure countermeasures for physical layer communication in the LAENet. We explore core methods focusing on anti-eavesdropping and authentication for ensuring communication confidentiality. Subsequently, availability-enhancing techniques are thoroughly discussed for anti-jamming and spoofing defense. Then, we review approaches for safeguarding integrity through anomaly detection and injection protection. Furthermore, we discuss future research directions, emphasizing energy-efficient physical layer security, multi-drone collaboration for secure communication, AI-driven security defense strategy, space-air-ground integrated security architecture, and 6G-enabled secure UAV communication. This survey may provide valuable references and new insights for researchers in the field of secure physical layer communication for the LAENet. + +Index Terms—Low-altitude economy networking, secure physical layer communications, communication confidentiality, communication availability, communication integrity. + +# I. INTRODUCTION + +WITH the rapid development of aerial vehicle technologies and communication networks, the concept of Low-Altitude Economic Networking (LAENet) has emerged to enable more comprehensive, large-scale, and intelligent + +Lingyi Cai is with the Research Center of 6G Mobile Communications, School of Cyber Science and Engineering, Huazhong University of Science and Technology, Wuhan, 430074, China, and also with the College of Computing and Data Science, Nanyang Technological University, Singapore (e-mail: lingyicai@hust.edu.cn). + +Jiacheng Wang, Ruichen Zhang, and Dusit Niyato are with the College of Computing and Data Science, Nanyang Technological University, Singapore (e-mails: jiacheng.wang@ntu.edu.sg; ruichen.zhang@ntu.edu.sg; dniyato@ntu.edu.sg). + +Yu Zhang and Tao Jiang are with the Research Center of 6G Mobile Communications, School of Cyber Science and Engineering, Huazhong University of Science and Technology, Wuhan, 430074, China (e-mail: yuzhang123@hust.edu.cn; tao.jiang@ieee.org). + +Xianbin Wang is with the Department of Electrical and Computer Engineering, Western University, London, ON, N6A 5B9, Canada (e-mail: xianbin.wang@uwo.ca). + +Abbas Jamalipour is with the School of Electrical and Computer Engineering, University of Sydney, Australia (e-mail: a.jamalipour@ieee.org). + +Xuemin Shen is with the Department of Electrical and Computer Engineering, University of Waterloo, Waterloo, ON N2L 3G1, Canada (e-mail: sshen@uwaterloo.ca). + +connectivity to support various low-altitude activities [1]–[4], such as intelligent transportation, logistics delivery, communication enhancement, disaster monitoring, and emergency response [5]–[8], as shown in Fig. 1. The LAENet is built upon earlier frameworks of single Unmanned Aerial Vehicle (UAV) operation and multi-UAV networks. A single UAV typically maintains a direct link to a ground station or base station, operating with simple control procedures and delivering cost-effective services but with limited range and scalability [9]. The UAV network focuses on formation control and multi-UAV collaboration, enabling broader mission areas and stronger fault tolerance [9]–[11]. Advancing from these foundations, the LAENet integrates various aerial vehicles into a high-density communication network, connecting them not only to ground stations but also to other platforms such as base stations, access points, and even satellites [12], [13]. Thus, the LAENet can enable ubiquitous coverage, high reliability, robust fault tolerance, greater autonomy, and intelligence. + +Specifically, the LAENet refers to an integrated network system that connects various low-altitude flight operations, including general aviation, drones, electric vertical take-off and landing (eVTOL) aircraft, and other aerial platforms, within the designated low-altitude airspace (typically below 1,000 meters, and in some cases extending up to 3,000 meters) [1], [13]. The LAENet serves as a vital bridge between ground-based economies and airspace resources, which will drive technological innovation and unlock substantial social and economic benefits [14], [15]. The Civil Aviation Administration of China estimates that the country's low-altitude market will soar from 500 billion Chinese yuan (about 70 billion US dollars) in 2023 to 1.5 trillion Chinese yuan (about 200 billion US dollars) in 2025 and as much as 3.5 trillion Chinese yuan (about 480 billion US dollars) in 2035 [16]. Currently, research institutions and enterprises across multiple regions in China are continuously advancing and expanding innovative research and commercial applications of UAVs and eVTOLs in low-altitude activities [17]. Meanwhile, in the United States, the Federal Aviation Administration has confirmed its commitment to actively promoting the development of electric air taxis and integrating this type of aircraft into the national airspace [18]. + +In the LAENet, physical layer communication serves as a critical foundation for wireless communication between aerial vehicles and between aerial vehicles and communication infrastructure [10], [28], [31]. The physical layer converts digital data from higher protocol layers into signals suitable for transmission over aerial communication channels [32]–[34]. + +![](images/bd4f13f7a34b1eba31da2c93966f6b7ca47565af3508ca10543f719898a1c39b.jpg) +Fig. 1. The overall architecture of the LAENet covers the main application scenarios, including emergency monitoring and response, temporary communication relay, communication coverage expansion, low-altitude smart logistics, and urban air mobility. The table compares the similarities and differences between the LAENet, single UAV, and UAV networks, representing the evolution of the LAENet. + +This process encompasses encoding data into bit sequences, modulating them onto carrier waves, and ensuring reliable signal propagation through the wireless medium [32], [35], [36]. At the receiver side, the physical layer performs inverse operations, including demodulating the incoming signals, decoding the bit sequences, and passing the data to upper layers for further processing [37]–[39]. Therefore, the physical layer supports the core communication mechanisms in the LAENet and plays a crucial role in its aerial deployment. For example, aerial vehicles deployed as aerial base stations (ABSs) or aerial relays can overcome interference, signal distortion, and environmental variations inherent in communication links by using physical layer functionalities such as channel access, multiplexing, and channel equalization [33], [40], [41]. + +However, physical layer communication in the LAENet is exposed to a variety of security threats due to the inherent characteristics of aerial communication environments [42]. The broadcast nature of wireless signals and the prevalence of line-of-sight (LoS) propagation make aerial links particularly vulnerable to eavesdropping, jamming, and spoofing attacks [1], [43]. These attacks can compromise communication confidentiality, disrupt communication, or deceive aerial vehicles by impersonating legitimate transmitters [44], [45]. Furthermore, the openness of wireless channels and weak authentication mechanisms increase the risk of unauthorized access and injection attacks, allowing adversaries to infiltrate the network or inject malicious signals [46], [47]. Additionally, the open medium and dynamic spectrum access may cause anomalous behaviors to disrupt normal communication operations in the LAENet [48], [49]. + +Confronted with these substantial security challenges, this paper conducts a comprehensive analysis on physical layer communications of the LAENet and provides a thorough survey of technologies and solutions to address communication confidentiality, availability, and integrity. Table II gives a clear structure for showing existing efforts on secure physical layer communications for the LAENet. + +# A. Related Surveys + +Recently, a number of excellent survey and tutorial papers have overviewed security issues in UAV networks and communications and have summarized corresponding countermeasures and solutions, as shown in Table I. Some works consider security issues at the system level including intrusion, privacy, and trust issues. The work in [19] provides a comprehensive review of security threats facing UAVs and UAV networks, including communication vulnerabilities, sensor spoofing, jamming, and malware attacks. It examines various countermeasures such as encryption, global positioning system (GPS) spoofing mitigation, and firmware signing. A gap analysis is performed to identify remaining security vulnerabilities and provide recommendations for future UAV development. The study in [20] conducts a comprehensive review of security issues in UAV swarm networks, examining various potential attacks such as communication attacks, identity-based attacks, resource attacks, routing attacks, data attacks, and machine learning (ML) attacks. It categorizes these threats and presents corresponding security technologies and countermeasures, including cryptography, physical layer security techniques, blockchain, machine learning, and intrusion detection + +TABLEI SUMMARY OF RELATED SURVEYS + +
ReferencesFocus
[19]A review of cybersecurity threats, countermeasures, and research gaps in UAV networks, with a focus on emerging attack surfaces and commercial UAV applications
[20]A survey of security threats, vulnerabilities, and countermeasures in UAV swarm networks, with a focus on classifying attack types and reviewing emerging defense technologies
[21]A review of security threats, vulnerabilities, and countermeasures in UAVs and Flying Ad Hoc Networks with attack surface analysis with simulation-based evaluation
[22]A survey of vulnerabilities across software, hardware, and communication layers in UAV systems, and an exploration of emerging defense technologies
[23]A survey of security challenges in drone communication and a review of emerging technologies used to enhance the speed, reliability, and security of UAV networks
[24]A review of UAV security challenges, existing controls, and future research directions, with an emphasis on the transformative role of AI in enabling secure UAV systems
[25]A review of security threats classified from a cyberspace security perspective and countermeasures in UAV systems
[26]A survey of security threats, requirements, and counter-measures in UAV-aided Internet of Things (IoT) applications
[27]A survey of cybersecurity vulnerabilities and countermeasures in UAV systems, integrating threat classification, communication protocols, and emerging techniques
[28]A survey of PLS in UAV communications, focusing on key challenges, methodologies, and recent advancements for both static and mobile UAV deployment scenarios
[29]A review of security challenges, practical deployment aspects, and standardization progress associated with integrating UAVs into cellular networks
[30]A survey of layer-wise cybersecurity threats and AI-enabled countermeasures in UAV-assisted IoT applications
+ +systems. The authors in [21] provide a detailed examination of security challenges in UAVs and FANETs, covering various attack vectors including communication, identity-based, resource, routing, data, and machine learning attacks. The study in [22] examines security and privacy vulnerabilities in UAV systems across hardware, software, and communication layers. It discusses various threats such as eavesdropping and jamming attacks, and presents defense mechanisms including blockchain, machine learning-based intrusion detection, and secure communication protocols. + +Some studies emphasize cyber security challenges within UAV networks. The study in [23] comprehensively reviews security issues in drone communication, including Denial of Service (DoS), GPS spoofing, and man-in-the-middle attacks. It examines vulnerabilities across different drone applications and presents countermeasures using blockchain, software-defined networks, machine learning, and fog computing. The authors of [24] provide a comprehensive survey of security challenges in UAV systems, including various types of attacks, privacy concerns, and trust issues. It identifies current research trends and gaps while establishing a future roadmap with a focus on artificial intelligence (AI)'s potential to enhance UAV security. The authors in [25] provide a comprehensive review of security issues in UAV networks, examining various potential attacks such as spoofing, replay, jamming, and + +# TABLE II + +# CHALLENGES AND SOLUTIONS + +RED CIRCLES DESCRIBE THE SECURITY ISSUES; GREEN CIRCLES REPRESENT THE OVERALL COUNTERMEASURES FOR THE SECURITY ISSUES; GREEN CHECK MARKERS INDICATE DIFFERENT TYPES OF SOLUTIONS UNDER EACH COUNTERMEASURE + +
Section III, Challenge 1: Communication confidentiality
Issues● Eavesdropping attack [46], [50]● Unauthorized access [31], [51], [52]
Solutions● Anti-eavesdropping strategies√ Convex optimization-based strategies [53]–[59]√ Reinforcement learning-based strategies [60]–[65]√ Deep learning-based strategies [66]–[71]● Communication authentication√ PUFs-based authentication [72]–[74]√ Channel based-authentication [75]–[77]
Section IV, Challenge 2: Communication availability
Issues● Jamming attack [48], [78], [79]● Spoofing attack [49], [50], [52], [78]
Solutions● Anti-jamming strategies√ Convex optimization [80]–[82]√ Single-agent RL [83]–[86]√ Multi-agent RL [87]–[89]● Spoofing defense√ PLA [77], [90], [91]√ GNSS spoofing detection [92]–[94]
Section V, Challenge 3: Communication Integrity
Issues● Anomalous behaviors [61], [95], [96]● Injection attacks [28], [46], [97]
Solutions● Anomaly detection√ Jamming anomaly detection [98]–[101]√ Abnormal power detection [102]√ Eavesdropping anomaly detection [103]● Injection defense√ Jamming signal injection defense [98], [101], [104]● Spoofing signal injection defense [105]–[107]
+ +eavesdropping attacks. It categorizes these threats and presents corresponding security technologies and countermeasures. The study in [26] provides a comprehensive review of security issues in UAV-aided IoT applications and presents corresponding security technologies and countermeasures. The work in [27] reviews cybersecurity threats affecting UAV systems and evaluates existing countermeasures in enhancing UAV security. + +In addition, some surveys analyze the challenges faced by UAV systems from a layered perspective (e.g., physical layer, link layer, network layer, application layer). The work in [28] deeply reviews the current state of physical layer security (PLS) in UAV communications, examining unique air-to-ground channel characteristics, static and mobile UAV deployment scenarios, and various security enhancement techniques. The work in [29] presents a comprehensive overview of UAV cellular communications, covering the classification of consumer drones, the concept and potential of UAV-mounted flying base stations. It explores the integration of UAVs into cellular networks as novel user equipment and addresses key challenges related to interference, regulatory compliance, and security. The authors of [30] review the cybersecurity landscape of UAV-assisted IoT applications, examining layer-wise security threats from physical to application layers. It explores how AI, ML, deep learning (DL), and reinforcement learning (RL) techniques have been employed to address authentication, data privacy, and attack prevention challenges. + +TABLE III +LIST OF ABBREVIATIONS + +
AbbreviationDescriptionAbbreviationDescription
A2GAir-to-groundABSAerial Base Station
ANArtificial NoiseAIArtificial Intelligence
BCDBlock Coordinate DescentBSBase Station
CNNConvolutional Neural NetworkCSIChannel State Information
DDPGDeep Deterministic Policy GradientDDQNDouble-deep Q-Learning
DLDeep LearningDNNDeep Neural Network
DQNDeep Q-NetworkeVTOLElectric Vertical Take-off and Landing
DRLDeep Reinforcement LearningFARFalse Alarm Rate
G2AGround-to-airG2UGround-to-UAV
GANGenerative Adversarial NetworkGNSSGlobal Navigation Satellite System
GPSGlobal Positioning SystemGSGround Station
IoTInternet of ThingsLAENetLow-Altitude Economy Networking
LSTMLong Short-Term MemoryLoSLine-of-sight
MARLMulti-agent Reinforcement LearningMDPMarkov Decision Process
MDRMiss Detection RateMECMobile Edge Computing
MLMachine LearningMSEMean Square Error
NOMANon-orthogonal Multiple AccessPLAPhysical-layer Authentication
PLSPhysical Layer SecurityPUFPhysical Unclonable Function
QoEQuality of ExperienceRFRadio Frequency
RISReconfigurable Intelligent SurfacesRLReinforcement Learning
RNNRecurrent Neural NetworkRSSReceived Signal Strength
SCASuccessive Convex ApproximationSDNRSignal-to-disturbance-plus-noise Ratio
SNRSignal-to-noise RatioSOCSecond-Order Cone
TDMATime-division Multiple AccessTHzTerahertz
U2GUAV-to-ground CommunicationUAVUnmanned Aerial Vehicle
+ +# B. Contributions of Our Survey + +The related surveys and tutorials primarily focus on the classification of overall security threats and corresponding countermeasures in UAV networks or UAV-assisted applications, with relatively little attention given to security issues of communication in the physical layer. Different from existing studies, our survey uniquely concentrates on the security challenges specific to physical layer communications in the LAENet, as summarized in Table II. It fills a critical gap in the literature by conducting an in-depth analysis of threats in physical layer communications that were previously underexplored or only briefly mentioned in prior studies. By offering a comprehensive and systematic analysis of these underexplored issues, our work brings new insights to seek effective solutions to enhance physical layer security in communications of the LAENet. The key contributions of this paper are summarized as follows: + +The key contributions of this paper are summarized as follows: + +- A thorough discussion of the six main security issues in the physical layer communication of the LAENet is presented, namely, eavesdropping attack, unauthorized access, jamming attack, spoofing attack, anomalous behaviors, and injection attack. We analyze these attacks in the context of their potential occurrence throughout the entire operation of LAENet, providing essential references for ensuring the security of physical layer communication in the future LAENet deployments. +- We review countermeasures against various attacks in detail and offer a comprehensive tutorial on achieving communication confidentiality, communication availability, and communication integrity in LAENet. In addition, + +the lessons learned for each security issue are presented to emphasize the limitations of existing works and provide high-level insights for improvements. + +- Several potential future research directions for secure physical layer communication in LAENet are proposed, including energy-efficient physical layer security, multi-drone collaboration for secure communication, AI-driven security defense strategy, space-air-ground integrated security architecture, and 6G-enabled secure UAV communication. These diverse perspectives offer new guidance for future research on secure physical layer communication in LAENet. + +The remainder of this paper is given as follows. Section II introduces the background of the LAENet and security issues in physical layer communication of the LAENet. In Section III, a comprehensive exploration of achieving communication confidentiality for the LAENet is presented. Section IV reviews the solutions for communication availability in the LAENet. In Section V, countermeasures on communication integrity for the LAENet are discussed. Section VI provides future research directions, and Section VII concludes this paper. Additionally, Table III lists the abbreviations commonly employed throughout this survey. + +# II. BACKGROUND KNOWLEDGE + +In this section, we introduce the background of the LAENet, including its definition and application scenarios. Subsequently, the concept of physical layer communication in the LAENet and its security threats are elaborated in detail. + +# A. Background of LAENet + +The LAENet is a sophisticated and dynamic system that integrates various aerial and terrestrial technologies to en + +![](images/1411e6ebf9ab0f74ca0614dc20359a8ea382871faee714eff22b4eddae00662e.jpg) +Fig. 2. Background knowledge of the LAENet and security issues in its physical layer communication. Describe the definition of the LAENet and its communication application scenarios. Elaborate on three key metrics for secure physical layer communication: communication confidentiality, which combats eavesdropping attacks and unauthorized access; anti-jamming strategies and spoofing defense for ensuring communication availability; and anomaly detection and injection defense to prevent adversaries from compromising communication integrity. + +able seamless communication, coordination, and management of diverse aerial operations within low-altitude airspace [1], [108]. The LAENet includes numerous different types of constituents, such as flight equipment, base stations, and other communication platforms. Specifically, the LAENet connects various aerial vehicles, including general aviation aircraft for passenger transport and emergency rescue, drones for surveillance and logistics, and eVTOL designed for urban air mobility and last-mile cargo delivery [109], [110]. These aerial vehicles can incorporate ground and aerial base stations, further high-altitude platforms, such as weather balloons and satellites, to receive environmental information and precise navigation [13]. + +Different from traditional aviation networks that rely on centralized air traffic control, the LAENet can independently construct communication and networking by seamlessly interconnecting a variety of aerial and ground-based systems, which enables continuous information exchange, flight path optimization, and autonomous operations [8], [111]. Therefore, the LAENet has opened opportunities for various application scenarios and plays key roles from the perspective of communication coverage and relay [112]–[114]. Specifically, the LAENet can extend the communication coverage by deploying aircraft as ABSs in areas lacking communication infrastructure [115]–[117]. For instance, these ABSs deployed at optimal altitudes can provide connectivity and network services in remote or disaster-stricken areas [118], [119]. Moreover, if the direct communication links between ground base stations and user equipment are unreliable, such as in + +mountainous regions and densely populated areas, the aircraft can act as mobile relays to improve connectivity by capturing, amplifying, and transmitting communication signals [120]–[122]. It also can be regarded as a surveillance unit to monitor airspace dynamics while simultaneously functioning as a low-altitude network orchestrator to optimize communication and computing resources [118], [123], [124]. + +To integrate and evolve these capabilities, the LAENet needs to establish effective communication infrastructure to ensure reliable connectivity and efficient interaction across various environments [31], [125]. Physical layer communication, as the bottom layer in the network architecture, may directly influence the communication performance of the LAENet across aerial and terrestrial networks [43], [46]. For example, it governs how signals are generated, transmitted, and received between aircraft and base stations [31]. Building on this, it manages the channel and spectrum resources to enhance signal transmission quality and maintain stable connectivity [43]. Therefore, ensuring the security of physical layer communication in the LAENet is crucial for supporting a wide range of applications in low-altitude domains. + +# B. Security Issues in Physical Layer Communication of LAENet + +Based on previous studies [126], [127], we discuss the security issues in the physical layer communication of the LAENet from three aspects: confidentiality, availability, and + +integrity of communications. The details of each measurement are described as follows. + +- The confidentiality of physical layer communications in the LAENet can be compromised by security threats such as eavesdropping and unauthorized access [128]. Eavesdropping arises primarily from the broadcast nature of wireless signals and LoS link, making transmissions highly susceptible to interception [46]. An eavesdropper silently capturing or intercepting signals can lead to the exposure of confidential information. Meanwhile, unauthorized access threats exploit the open and broadcast nature of UAV communications [31]. Attackers may gain illegal access to the LAENet by disguising themselves as legitimate UAVs or ground stations, thereby deceiving or interfering with the normal operation of UAVs [51]. +- Similarly, the open nature of wireless channels and LoS propagation bring jamming and spoofing security issues for communication availability [78]. Specifically, jammers can continuously transmit interference signals to disrupt communication, where a jammer can be a drone or a base station [48]. The spoofing attack can not only achieve identity spoofing by forging legitimate transmission identities but also launch signal deception attacks to disrupt UAV communications and positioning [49]. Therefore, jamming and spoofing lead to unauthorized access and signal disruptions or errors, making communication unavailable in the LAENet. +- Integrity as a microscopic metric measures the deviations of signals, channels, and spectrum in communication under adversaries' influence [129]. The communication integrity of the LAENet can be affected by anomalous behaviors and injection attacks. Anomalous behaviors often use dynamic spectrum access and the open wireless medium, including abnormal jamming, abnormal transmission power, and covert eavesdropping [95]. These anomalous behaviors can introduce harmful interference, violate spectrum policies, and expose sensitive information to eavesdroppers [61], [96]. Moreover, the injection attack exploits the open nature of wireless channels to alter signals or inject illegal signals, such as spoofing signals or malicious GNSS signals, to deceive receivers and interfere with communication, thereby leading to degraded signal quality, false navigation, and network congestion [28], [46], [97]. + +Overall, as illustrated in Fig. 2, this survey reviews existing research on achieving communication confidentiality, availability, and integrity for the LAENet. Specifically, the investigation of anti-jamming strategies and communication authentication schemes aims to enhance communication confidentiality. Studies on anti-jamming techniques and spoofing defense mechanisms have been explored to ensure communication availability. Furthermore, research on communication integrity has focused on anomaly detection and injection attack mitigation approaches. + +# III. COMMUNICATION CONFIDENTIALITY FOR LAENET + +# A. Anti-eavesdropping Strategy + +The LAENet faces significant eavesdropping threats due to the inherent vulnerabilities of UAV-enabled wireless communications. The openness of wireless channels, especially the LoS links in air-to-ground (A2G) and ground-to-air (G2A) communications, increases susceptibility to interception by eavesdroppers that disrupt legitimate communications compared to traditional terrestrial channels [50]. Traditional cryptographic methods, while effective in many scenarios, are less suitable for UAV communications due to their computational complexity and the dynamic mobility of UAVs [130]. This highlights the critical need for robust security measures to ensure the confidentiality and reliability of the LAENet communications. To address these limitations, leveraging PLS techniques to counter eavesdropping threats effectively has emerged as a promising solution [131]-[134]. + +In the LAENet, anti-eavesdropping solutions can leverage the controllable mobility of low-altitude aircraft to enhance physical layer security. By dynamically optimizing their trajectories, low-altitude aircraft can actively adapt their flight paths to shape the communication environment [135]. This approach allows them to fly closer to legitimate ground nodes, strengthening communication links and improving channel conditions for intended receivers, while simultaneously distancing themselves from potential eavesdroppers. In this subsection, we present a critical role of UAV trajectory in forming the communication environment, and how PLS can be enhanced through trajectory optimization and resource allocation to mitigate eavesdropping risks. Our analysis focuses on three prominent methodologies in this domain: convex optimization, deep learning, and reinforcement learning. + +Convex optimization plays a crucial role in addressing anti-eavesdropping challenges in UAV-enabled communication networks, particularly for solving the joint optimization of trajectory and resource allocation [137]. Due to the inherent non-convex nature of these problems, advanced convex optimization techniques such as Successive Convex Approximation (SCA) and Block Coordinate Descent (BCD) are widely utilized [135]. These methods enable UAVs to enhance physical layer security by optimizing flight paths and resource utilization, minimizing the risk of eavesdropping while ensuring secure and efficient communication. Additionally, the decision variables may be discrete, which requires the application of various relaxation methods to transform the complex optimization problem into a more tractable form to obtain efficient solutions [138]. + +The study in [53] explores physical-layer security in UAV-assisted Mobile Edge Computing (MEC) systems in the presence of multiple ground-based eavesdroppers. The proposed system utilizes dual UAVs for task execution and anti-eavesdropping measures. One UAV operates as a mobile MEC server, while the other emits jamming signals to disrupt eavesdroppers, as shown in Fig. 3. The time-division multiple access (TDMA) scheme and non-orthogonal multiple access (NOMA) scheme are proposed to maximize the minimum secure computing capacity by jointly optimizing communica + +TABLE IV SUMMARY OF CONVEX OPTIMIZATION FOR ANTI-EAVESDROPPING STRATEGY CIRCLES DESCRIBE THE METHODS; CORRECT MARKERS AND CROSS MARKERS REPRESENT PROS AND CONS RESPECTIVELY. + +
RefOptimization ObjectivesEavesdropper and Jammer TypeOptimizationConstraintsPros & Cons
[53]Secure calculation capacity1UAV jammer and fixed ground eavesdropperTransmit power, time allocation, and computation capacityBCD and P-BCD for secure calculation capacity maximization +✓ Secure capacity of NOMA and TDMA has been significantly improved +X High complexity for NOMA due to dual-loop iterations
[54]Secure calculation capacityBase station jammer and fixed ground eavesdropperTransmission power, time allocation, and CPU processing frequencyJDPB algorithm with SCA and BCD for secure task offloading +✓ Reduce complexity via region division +X Fixed UAV altitude limits 3D trajectory optimization
[55]Average secrecy rate2Antenna jammer and fixed aerial eavesdropperTransmit power and jamming powerBCD and SCA optimization with hybrid FSO/RF links +✓ Enhance communication security via hybrid FSO/RF links and AN +X Rely on simplified channel models (e.g., free-space path loss)
[56]Worst-case secrecy rateUAV jammer and fixed ground eavesdropperUAV speed, collision avoidance, positioning error, and energy harvestingRobust 3D trajectory and time switching optimization +✓ Full mobility of UAVs in 3D for improving secrecy rate +X The performance may degrade with flying eavesdroppers
[57]Average secrecy rateNone and flying eavesdropperTransmit power control and user schedulingJoint trajectory and communication design against mobile eavesdroppers +✓ Initial trajectory design for keeping away from eavesdroppers +X Security performance relies on the initial trajectory design
[58]Secure calculation capacityGround jammer and flying eavesdropperTransmit power, time slot, computation capacity, UAV speed, and collision avoidanceIntegrate a dual-UAV system with a ground jammer in MEC +✓ Incorporate the UAV server and UAV eavesdropper with a ground jammer +✓ Allow a UAV server to hover near ground users for secure offloading +X Numerous flight constraints may require extensive tuning
[59]Secrecy rateCoastal jammer and flying eavesdropperTransmit power, time slot, computation capacity, UAV speed, and collision avoidanceA secure communication for UAV-relay-assisted maritime MEC +✓ Simultaneously optimize multiple parameters for improved secrecy rate +X Iterative decomposition increases the computational burden +X Assume prior knowledge of Channel State Information (CSI) of devices
+ +1Secure calculation capacity is defined as the average number of secure calculation bits in UAV flying time [54]. +2Secrecy rate is defined as the difference between the achievable rate of legitimate UAV's channel and the rate of eavesdropper channel [136]. + +tion resources, computation resources, and UAV trajectories. To address the non-convexity of the optimization problem, the problem is transformed into tractable forms via auxiliary variables and decomposition. Specifically, for the TDMA scheme, the problem is decoupled into two sub-problems using BCD. The communication and computation resources are optimized via second-order cone (SOC) constraints and SCA, while UAV trajectories are iteratively updated via first-order Taylor approximations to handle non-convex terms. For the NOMA scheme, a penalized BCD (P-BCD) algorithm is proposed to tackle binary constraints. The problem is split into three blocks that are penalty parameter adjustment, resource allocation via SOC and SCA, and trajectory optimization with convex relaxations. The experimental results demonstrate that the proposed algorithms significantly enhance secure computing capacity, with the NOMA scheme achieving up to about 4.3 Mbps and the TDMA scheme reaching about 4.2 Mbps under optimal conditions. Compared to baselines including the straight flight design and no power control, the proposed strategies improve secure computing capacity by about $20\%$ to $30\%$ , particularly in scenarios with lower power budgets (e.g., 0.2 W) and higher required computing bits (e.g., 1 Mbps). The convergence of the algorithms is achieved within 20 iterations, which indicates the efficiency in optimizing UAV trajectories and resource allocation for anti-eavesdropping. + +The study in [53] mainly focuses on a dual-UAV-assisted secure MEC system. In some cases, multi-UAV systems hold great promise for collaboratively executing complex tasks while enhancing the secure communications [49], [54]. In the + +work [54], the joint optimization of task offloading, trajectory planning, and resource allocation for secure communications in multi-UAV MEC systems is studied. Firstly, a base station emits jamming signals to protect against fixed-location ground eavesdroppers. Then, it investigated the joint optimization of task offloading, trajectory planning, and resource allocation for secure communications in multi-UAV MEC systems. The problem is decomposed into two sub-problems: (1) resource allocation and trajectory planning, addressed via SCA and BCD algorithms; (2) offloading decisions, solved through Joint Dynamic Programming and Bidding (JDPB) method. For the first sub-problem, non-convex constraints related to transmission power and UAV trajectory are transformed into convex forms using first-order Taylor expansion and relaxation techniques. Specifically, the transmission power optimization sub-problem is approximated via SCA, while the trajectory planning sub-problem is iteratively solved by introducing auxiliary variables and convex approximations. For the second sub-problem, a bidding mechanism is integrated with dynamic programming to reduce computational complexity by grouping dynamic users into sub-regions. The experimental results demonstrate that the proposed JDPB algorithm achieves a sum average secure calculation capacity of 10.1 Mbps in the first time slot. Additionally, under different settings of time slot sizes, transmission power, and flying speed, the sum average secure calculation capacity achieved by JDPB consistently outperforms baseline schemes such as the Greedy Strategy and the Random Strategy. + +Unlike the above studies that deal with ground eavesdrop- + +![](images/b0ced301d684649bd66f097e1413eaf4b7a34732744caf3707d8fc50381c1366.jpg) + +![](images/95b7178677e3e6554d2277956eb4b90047bdfe2645c90f9834611a75b83e6307.jpg) +Fig. 3. The overall architecture of the anti-eavesdropping strategy. Part A illustrates the system model against fixed ground eavesdroppers. In this setup, one UAV operates as a mobile server, while another UAV serves as a jammer to emit jamming signals to disrupt the eavesdroppers' interception capabilities. Part B presents the system model for flying eavesdroppers, where one UAV acts as the server, and another UAV functions as a mobile eavesdropper. To mitigate eavesdropping risks, a ground-based jammer actively emits interference signals to secure communications. + +pers, the work in [55] targets threats from aerial eavesdroppers and explores secure communication in a hybrid Free Space Optical (FSO) and Radio Frequency (RF) system. The UAV acts as both a relay and a jammer, emitting artificial noise (AN) during RF transmission to confuse a fixed-position aerial eavesdropper. The work introduces a novel perspective on protecting space-air-ground networks from eavesdropping by leveraging FSO for its inherent resistance to interception and jointly optimizing trajectory design and power allocation to maximize the secrecy rate with two transmission schemes. The first scheme is the slot-based scheme for delay-sensitive data. The trajectory sub-problem is convexified using first-order Taylor expansion to approximate elevation angle and channel gain constraints, while the power allocation sub-problem is transformed into a convex form by introducing a lower bound on transmit power to ensure convexity. The second scheme is the period-based scheme for delay-insensitive data, in which the relaxed constraints on sum secrecy rates over the entire flight period are adopted. A similar SCA method [54] is applied to convexly approximate the non-convex terms in the constraints. Compared to benchmark schemes without jamming power optimization, both methods achieve approximately 0.4 Mbps higher secrecy rates by integrating AN transmission + +and hybrid FSO/RF links. + +It is worth noting that most existing studies consider optimizing UAV trajectories on a 2D plane. However, optimizing UAV 3D trajectories may be more practical [139]. The study in [56] considers the UAV's 3D flight trajectory and imperfect knowledge of eavesdroppers' locations, while formulating an optimization approach to maximize the worst-case secrecy rate under various practical constraints, including maximum UAV speed, UAV collision avoidance, UAV positioning error, and UAV energy harvesting. To address the non-convexity of the optimization problem, the original problem is decomposed into multiple sub-problems using BCD and SCA techniques similar to studies in [54] and [55]. By incorporating the additional degree of freedom in the vertical dimension, the proposed approach improves the ability to avoid fixed eavesdropping zones, outperforming 2D trajectory models in maintaining secure communication links under dynamic conditions. Simulation results show that the average secrecy rate of the proposed 3D optimization scheme outperforms that of the fixed-height 2D benchmarks (set at $100\mathrm{m}$ ) by over $20\%$ . + +Unlike the above studies that focus on fixed ground eavesdroppers, mobile eavesdroppers, such as hostile UAVs, introduce more complex threats due to their ability to maneuver, track, and position for intercept communications [22], [57]. For example, the authors in [57] address the challenges caused by a flying eavesdropper that exploits UAV LOS communication. This work focuses on jointly optimizing the UAV's trajectory, transmit power control, and user scheduling to maximize the minimum average secrecy rate, which enables dynamic adjustments to ensure secure communication even against an mobile eavesdropper. + +Compared to the anti-eavesdropping strategies in [57] that rely heavily on accurate trajectory optimization and resource allocation, the studies in [58], [59] propose using a jammer to actively emit jamming signals, effectively reducing the interception capability of flying eavesdroppers during the computational offloading process of relay UAVs, as shown in Fig. 3. Meanwhile, with the support of SCA and BCD methods similar to [56], the joint optimization problem of UAV trajectories, resource allocation (including transmit power, time slot allocation, and computation capacity), and jamming strategies can be solved while ensuring practical constraints such as flight speed and anti-collision requirements. Importantly, compared to systems targeting fixed ground eavesdroppers, the works in [58], [59] enhance secure calculation capacity or secrecy rate by modeling the trajectories of both the relay UAV and the mobile eavesdropper as dynamic variables optimized over discrete time slots. Specifically, simulation results in [58] demonstrate that the secure calculation capacity of the proposed scheme converges to approximately 2.78 Mbps within 4 iterations, which is significantly higher than the baseline strategy (where only the location of the relay UAV, transmit power, and jamming power are optimized) by approximately 1.6 Mbps. + +Lesson Learned. Convex optimization has emerged as a fundamental tool for developing anti-eavesdropping strategies in UAV-enabled communication systems, particularly for addressing the inherent non-convexity of joint trajectory and + +![](images/45d7c87df94084b85e80ffec3b31f41bc270ce4f3ed9cdb12fa2a0d42e0bd614.jpg) +Part A. DDQN-based Scheme + +![](images/50ec81d8ac805f5b51db78cda3503bc04eb8a6e427ed0449fb7c44c1037e1683.jpg) +Part B. DDPG-based Scheme + +![](images/b348c7b89c665fbdeeb480d0daf269e5c8246d7039aaaa623043b3df6f5fb043.jpg) +Part D. MAPPO-LSTM-based Scheme + +![](images/ec488ac4d64ffc43f41564e983772de945b631322c5ef73f5df7d5475abec595.jpg) +Part C. MADDPG-based Scheme +Fig. 4. The overall architecture of the RL for anti-eavesdropping. Part A describes the DDQN-based scheme, where the system state is used to generate actions through the DDQN network, followed by action execution and obtaining the next state and reward. An experience replay mechanism is employed to store and randomly sample training data. Part B presents the DDPG-based scheme, where actions are generated through Actor and Critic networks, interacting with the environment to obtain rewards. An experience replay buffer is used to store and sample mini-batches. Part C describes the MADDPG-based scheme, involving multiple UAV agents, each with its own Actor and Critic networks, interacting with the environment and sharing rewards. Part D showcases the MAPPO-LSTM-based scheme, where Actor and Critic networks with LSTM layers process time-series data and train through an experience replay buffer. + +resource allocation problems. For fixed eavesdroppers, simpler optimization models with fewer dynamic variables (e.g., 2D trajectory optimization) can achieve secure communication effectively. However, mobile eavesdroppers require more sophisticated formulations, including 3D trajectory optimization and robust constraints to account for uncertainties in eavesdropper positions. Another important insight is the adaptability of convex optimization when combined with complementary methods like artificial noise jamming and resource allocation strategies. By leveraging convex optimization, systems can balance secrecy performance with energy efficiency, ensuring practical applicability in real-world UAV operations. Techniques such as SCA and BCD have proven highly effective in decoupling complex optimization problems into solvable subproblems, allowing iterative refinement toward locally optimal solutions. Overall, convex optimization offers a flexible and mathematically rigorous approach to securing UAV-enabled communication systems for anti-eavesdropping. + +As the number of ground devices increases, along with UAV flight time and the number of optimization variables, the computational complexity of conventional algorithms grows exponentially, leading to infeasibility or suboptimal solutions [140], [141]. Moreover, these methods struggle to adapt to real-time scenarios where UAVs must communicate with mobile users and operate in environments with uncertain or partial information [140], [142]. RL enables UAVs to interact with the environment and autonomously learn optimal policies based on real-time observations [143], as shown in Fig. 4. By leveraging Deep RL (DRL), UAVs can efficiently + +adapt to changing eavesdropping conditions, optimize secure trajectories, and dynamically allocate resources [144], [145]. This learning-driven approach significantly enhances PLS by ensuring adaptive, scalable, and intelligent anti-eavesdropping strategies in UAV communication networks. + +The study in [60] proposes a Deep Q-Network (DQN)-based approach to address the challenge of securing UAV-assisted multi-user wireless communications against passive eavesdropping attacks. The UAV trajectory optimization is formulated as a Markov Decision Process (MDP), where the state space includes the UAV's 3D coordinates and the positions of users. The action space consists of discrete movements in the $x$ , $y$ , and $z$ directions, with each action representing a step change in position. The reward function is designed to maximize the legitimate users' rates, defined as the sum of the channel capacities of users served by the UAV. Unlike many prior works that assume perfect knowledge of eavesdropper CSI [53], [59], this study focuses on optimizing legitimate user rates and using the DQN-based approach without requiring full knowledge of the eavesdropping channels. The DQN iteratively optimizes the UAV's trajectory, beamforming matrix, and transmit power allocation, ensuring the UAV dynamically adjusts its position to maximize secrecy capacity. Numerical results show that the secrecy capacity improves with the number of users. The proposed method converges an order of magnitude faster than the Q-learning method and achieves around $35\%$ higher secrecy capacity than Q-learning after 20,000 episodes. + +However, the DQN method may face the issue of Q- + +TABLE V SUMMARY OF RL FOR ANTI-EAVESDROPPING STRATEGY CIRCLES DESCRIBE THE METHODS; CORRECT MARKERS AND CROSS MARKERS REPRESENT PROS AND CONS RESPECTIVELY. + +
TechniquesReferenceAlgorithmPros & Cons
Value-based RL[60]DQN○ DQN algorithm for UAV trajectory optimization to maximize the secrecy capacity +✓ Low computational complexity, making it easy to train +× Q-value overestimation, leading to suboptimal action selection
[61]DDQN○ DDQN-based joint trajectory, time allocation, and offloading optimization +✓ Accelerated convergence via action space pruning +✓ Real-time optimization of trajectory and resources +× DDQN is restricted to discrete action spaces +× DDQN is not suitable for continuous action control
Policy Gradient-based RL[62]CAA-MADDPG○ Multi-Agent DRL with attention mechanisms (CAA-MADDPG) for secrecy rate maximization +✓ Handle complex multi-agent with the attention mechanism +× Assume prior knowledge of eavesdropper locations +× Assume ground devices are static
[63]DDPG○ DDPG-based RL for enhancing bi-directional UAV communication security +✓ Support mobile devices and ensure bi-directional secureit +× Computational overhead increases with device density +× performance may be sensitive to hyperparameter selection
[64]PPO+DCCN○ Hybrid DCCN and PPO for secrecy rate maximization +✓ The PPO optimizes the UAV trajectory based on the results from DCCN +× The performance is sensitive to the choice of clipping factor in PPO
[65]MAPPO+LSTM○ MAPPO for multi-agent cooperative anti-eavesdropping and LSTM-enhanced sequential learning +✓ The MAPPO+LSTM improves the learning capability of sequential sample data +× Assume perfect knowledge of CSI may be challenging in real-world scenarios
+ +value overestimation, leading to suboptimal action selection [146]. The authors in [61] propose a double-deep Q-learning (DDQN)-based scheme to jointly optimize the UAV trajectory, time allocation, and offloading decision strategy, aiming to maximize the average secure computing capacity for antieavesdropping in UAV-aided MEC. The system model involves one legitimate UAV server, one illegitimate UAV eavesdropper, one ground jammer, and ground users. The proposed DDQN-based scheme models the optimization problem as an MDP with states, actions, and rewards. The states include the coordinates of the UAVs, while the actions involve offloading decisions, time allocation, and trajectory adjustments. The reward function maximizes secure computing capacity. The DDQN model includes a deep Q-network (QN) and a deep target network (TN) to generate actions and evaluate their values. The parameters of the QN are updated by minimizing the loss function, and the parameters of the TN are periodically updated. The proposed scheme reduces the action space size by deleting illegal actions, such as those that violate time allocation constraints or result in resource waste. Unlike prior works [57], [60] that rely on conventional optimization or DQN with limited consideration of task queues, this approach integrates real-time resource allocation and trajectory optimization while ensuring dynamic constraints. The proposed DDQN scheme converges in 2000 episodes, half the episodes required by DQN (4000 episodes), and achieves a 0.02 Mbits higher average secure computing capacity. + +The value-based RL method (e.g., DQN) mainly focuses on dealing with discrete action spaces that may lead to the loss of optimal solutions [147]. The policy gradient-based RL method (e.g., Deep Deterministic Policy Gradient (DDPG)) can handle continuous action spaces [148], which are more suitable for UAV trajectory and transmit power optimization problems. + +The authors in [62] propose a multi-agent DRL framework to address the challenge of secure UAV communications in + +the presence of eavesdroppers. The system model is similar to Part A of Fig. 3, where the UAV server sends confidential information to ground users, and UAV jammers send AN signals to ground eavesdroppers using 3D beamforming. This study designs the Multi-Agent Deep Deterministic Policy Gradient with a continuous action attention mechanism (CAA-MADDPG) to maximize the system's secrecy rate. The attention mechanism dynamically prioritizes relevant agents' observations (e.g., jammers focusing on eavesdroppers) to reduce the exploration space and accelerate convergence, thereby enhancing the system's ability to counteract eavesdropping attempts. The simulation results show that CAA-MADDPG achieves a secure rate of $4.5\mathrm{bps / Hz}$ and converges in 1000 episodes with three UAV jammers, outperforming MADDPG (around $4\mathrm{bps / Hz}$ and 1400 episodes) and DDPG schemes. + +However, the study in [62] just considers UAV-to-ground communication (U2G) and assumed the ground devices are static. The work in [63] addresses the challenge of securing bi-directional ground-UAV communications in a dynamic environment with mobile ground devices and eavesdroppers. Different from prior works that assume static ground eavesdroppers [54], [56], this study considers mobile ground eavesdroppers for more practical real-world scenarios. The communication in U2G and ground-to-UAV (G2U) cases is modeled, considering factors such as channel gains and distances. The problem of maximizing the worst-case average secrecy rate is formulated as a constrained MDP (CMDP) under the constraints of UAV flight space, flight speed, energy capacity, anti-collision, and peak transmit power. To solve the CMDP, the authors design a DDPG-based RL algorithm. The algorithm includes three key components: the primary network (actor and critic networks), the target network, and the replay buffer. They also adopt state normalization and exploration noise to speed up the training convergence of the DDPG. The proposed joint optimization scheme achieves a secrecy rate + +over $40\%$ higher compared to baselines that optimize only trajectory or only power. In addition, DDPG outperforms DQN by approximately $15\%$ in secrecy rate due to its ability to handle continuous actions. + +The DDPGG methods form a fixed mapping from states to actions, which is not suitable for large state spaces that require more exploration and uncertainty [149]. The PPO alleviates this limitation by introducing proximal policy optimization, which allows for more exploration in the large action space. + +The study in [64] proposes a hybrid framework (Double Cascade Correlation Network (DCCN) + PPO) to maximize the secrecy capacity. DCCN bypasses the need for labeled training data by cascading two neural networks to maximize the secrecy channel rate. The PPO dynamically adjusts the UAV's position by using clipped surrogate objectives to stabilize policy updates and advantage estimation to prioritize high-reward actions. Simulation results show that the proposed scheme (DCCN + PPO) achieves an average secrecy rate of 0.73 bps/Hz, outperforming the benchmarks DCCN + DDPG (0.67 bps/Hz) and Random RIS + PPO (0.13 bps/Hz). However, the average secrecy continues to decline when the transmit power is higher than 2 W, since the jamming signals transmitted by the secondary source against the eavesdropper will also affect the primary users. + +The study in [64] considered only the use of one UAV to assist in secure communication. However, in low-altitude economic networks, it may be more important for multiple UAVs to collaborate to implement anti-eavesdropping strategies. The study in [65] considers a system model treats all secondary transmitters and multiple UAV jammers as multi-agents. A Multi-Agent PPO algorithm combined with Long Short-Term Memory (LSTM) networks, named MAPPO-LSTM, is proposed to maximize the secure communication rate by jointly optimizing the UAV trajectory, transmission power, and energy harvesting coefficients. The problem is formulated as a nonconvex MDP consisting of an action space, state space, observation, and reward (which consists of the sum of the secure communication rate, SINR information, and battery capacity). The MAPPO algorithm introduces counterfactual baselines to address the credit assignment problem in centralized learning and combines with the LSTM network to enhance the learning capability of sequential sample data. Compared to benchmark schemes MAPPO and MADDPG, the proposed MAPPO-LSTM method achieved around $17\% - 20\%$ higher average secrecy rate in large-scale scenarios, with convergence speeds 1.37 times and 1.93 times faster, respectively. In addition, the reward is sensitive to the discount factor, where setting factor to 0.99 enables faster and more stable convergence. Deviations from this value result in more pronounced fluctuations in the reward and secrecy rate. + +Lesson Learned. The RL has emerged as a powerful yet challenging tool for anti-eavesdropping in UAV-assisted secure communications. A key lesson is that multi-agent cooperation significantly enhances security compared to single-agent approaches, enabling adaptive trajectory control, power allocation, and jamming coordination in dynamic environments. However, scalability and convergence efficiency remain critical bottlenecks, especially in high-dimensional, time- + +varying settings, as many studies unrealistically assume perfect channel information, and deep RL's convergence time leaves the system vulnerable before optimization completes. Furthermore, key limitations demand further attention, such as RL's computational complexity, which restricts its use in resource-limited settings requiring real-time security, and its sensitivity to hyperparameter tuning that requires meticulous configuration to ensure optimal performance. Future advancements should focus on developing generalizable and robust learning architectures that can dynamically adapt to evolving threats while maintaining computational feasibility, addressing practical deployment challenges, exploring hybrid approaches, prioritizing security in system design, and balancing security performance with energy consumption. + +Deep learning, with its strong learning capabilities, parallel processing, and comprehensive reasoning [150]–[152], has huge potential to enhance anti-eavesdropping strategies in UAV communications, especially in environments with rapidly changing conditions and complex interactions [153]. Given the intricate problem of UAV trajectory variation and its nonlinear characteristics in time and space [154], [155], deep learning networks, such as neural networks and generative models, are emerging as potential solutions to improve the security and performance of UAV communication systems. + +The study in [66] explores the use of deep learning to optimize UAV deployment and jamming strategies against eavesdroppers to maximize the secrecy rate in the complete CSI scenario. The optimization problem is decomposed into two layers: the inner layer optimizes jamming power for a fixed UAV location, and the outer layer optimizes UAV deployment. The inner problem is solved using a bi-section search algorithm, while the outer problem is addressed using a deep neural network (DNN) to approximate the optimal UAV deployment. The DNN is designed as a fully connected structure, which includes an input layer, two hidden layers, and an output layer, as shown in part A of Fig. 5. The DNN is trained using a dataset generated by simulating different UAV deployments and corresponding secrecy rates. The final optimal deployment of the UAV can be approximated when the mean square error of weights between neurons is minimized. The DNN model achieves an average distance error of 2.2 meters compared to the optimal deployment found by the exhaustive search baseline. + +The fully connected neural network used in [66] is suited for problems where inputs and outputs are fixed-dimensional vectors without inherent spatial or sequential relationships [150]. Moreover, convolutional neural networks (CNNs) and recurrent neural networks (RNNs) can also contribute to antieavesdropping. In contrast to fully connected networks, CNNs are particularly effective for exploring spatial features from images or spatial maps [156]. RNNs, on the other hand, focus on handling sequential data by maintaining a memory of previous inputs through recurrent connections [150]. The authors in [67] propose a CNN-LSTM-based secure efficiency map (SEM) framework, which is constructed by calculating each subarea's security-efficiency index using a weighted exponential coefficient to combine normalized secure spectrum efficiency (secrecy rate per unit bandwidth) and secure energy + +TABLE VI SUMMARY OF DEEP LEARNING FOR ANTI-EAVESDROPPING STRATEGY CIRCLES DESCRIBE THE METHODS; CORRECT MARKERS AND CROSS MARKERS REPRESENT PROS AND CONS RESPECTIVELY. + +
TechniquesReferenceAlgorithmPros & Cons
Neural network model[66]DNN● Use DNN to optimize UAV deployment and jamming strategies for secure communication +● The DNN model reduces the complexity of exhaustive searches +● Rely on complete statistical channel knowledge +● require intensive resources to generate a large amount of training data
[67]CNN-LSTM● CNN-LSTM-based SEM prediction for dynamic secure UAV trajectory optimization +● Efficient spatiotemporal feature extraction via CNN-LSTM +● Assume fixed UAV height and passive eavesdropper +● Training CNN-LSTM network requires a substantial amount of data
[68]FL-DNN● FL-DNN-RL integration (FairLearn) for maximizing fairness in secrecy rates +● Collaborative learning via FL improves generalization in anti-eavesdropping strategies +● Involving multiple learning mechanisms requires significant computational resources +● Assuming perfect CSI and eavesdropper localization may be impractical
Generative AI model[69]MD-GAN● MD-GAN with unknown CSI as model weights +● Adapt to dynamic environments via gradient-based training +● Do not require knowledge of the eavesdropper's detection threshold +● Training a GAN can be computationally intensive
[70]DD-GAN● DD-GAN uses genetic algorithm-generated datasets for GAN training +● Achieve an effective trade-off between covert rate and detection probability +● Training relies on the quality and quantity of the genetic algorithm-generated data
[71]GDMTD3● GDMTD3 integrates generative diffusion models into TD3 +● Handle high-dimensional action spaces to adapt mobile eavesdroppers +● Computational complexity from diffusion denoising steps
+ +efficiency (secrecy rate per unit power). Historical SEMs are fed into a CNN-LSTM network to predict future SEMs by leveraging spatial-temporal feature extraction and time-series correlation. Based on predicted SEMs, a trajectory planning algorithm dynamically guides the UAV to subareas with the highest security-efficiency indices. The proposed SEM-enabled trajectory planning achieves an average security-efficiency index of 0.81, outperforming baseline schemes (e.g., static trajectory [142] or non-predictive methods [62], [157]) by over $30\%$ . + +Previous deep learning-based architectures [66], [67] are centralized, lacking collaboration and knowledge sharing among UAVs, while also facing challenges in privacy preservation and scalability. To address these limitations and optimize secrecy rate maximization under constraints such as UAV mobility, power budgets, and scheduling fairness, the authors in [68] propose a federated learning (FL)-based framework (FairLearn). As shown in part B of Fig. 5, the FairLearn employs three learning modules: (1) Module-D uses RL to dynamically generate training datasets by exploring UAV trajectories, power allocation, and scheduling policies; (2) Module-P employs a DNN trained on these datasets to predict optimal 3D trajectory, transmit power, and user scheduling, maximizing proportional fairness in secrecy rates (defined as the difference between legitimate UAV-user rates and eavesdropper rates); (3) Module-C applies FL to aggregate DNN models across UAVs, enabling collaborative learning while preserving data privacy. Simulation results show that FairLearn's secrecy rate is $26.6\%$ higher than BCD at 1.4W transmit power. After 100s of execution, FairLearn achieves $14.34\%$ , $24.56\%$ , and $108\%$ higher secrecy rates than BCD, MAQ, and QCQP baselines, respectively. + +It is worth noting that UAVs can only obtain limited prior environmental information without knowing perfect channel information and the eavesdropper's detection threshold or ex + +act location. Some previous methods [59], [65], [68] may find it difficult to solve the optimization problem in such scenarios. In contrast, the generative adversarial network (GAN) has emerged as a new model for solving optimization problems with limited prior information [158], [159]. GAN can effectively model and approximate unknown distributions (such as channel coefficients, detection thresholds, and environmental parameters) through adversarial learning, where the generator continuously improves its strategy by learning from the feedback from the discriminator [158]. + +The work in [69] addresses challenges related to partial channel distribution information and unknown eavesdropper detection thresholds by proposing a model-driven GAN (MDGAN) framework. The unknown channel coefficients and detection thresholds are treated as trainable weights in the MD-GAN. The MD-GAN transforms the joint trajectory and power optimization problem into a dynamic game between a generator (UAV) and a discriminator (eavesdropper), where the UAV acts as a jammer to protect secondary users from eavesdroppers. The generator optimizes the UAV's 3D trajectory and jamming power, while the discriminator evaluates detection errors. Then, a GAN-based joint trajectory and power optimization (GAN-JTP) algorithm is developed to achieve Nash equilibrium (i.e., maximizing the covert rate and the probability of detection errors). As shown in part C of Fig. 5, the GAN-JTP algorithm consists of two stages: network learning and network training. In the network learning stage, the generator optimizes the UAV's trajectory and transmit power based on the current state and environment. In the network training stage, the generator and discriminator are alternately trained using gradient backpropagation to update their weights. Simulation results show that increasing the training of the discriminator accelerates the convergence of the generator (e.g., when the training step is 10, convergence is achieved within 30 iterations, compared to 89 iterations when + +![](images/44334917753d0735f58e494eaeb14ab8b73c2c003ece0c5817ff07e552328506.jpg) +Part A. DNN-based Architecture + +![](images/299abeaa978c56d61eea7db4a1382c43b7e456df3bd4d429ea25d378ff930e93.jpg) +Part B. FL-DNN-based Architecture + +![](images/7550af879c50bedad2e22d1c77a418e5ebe094455580bef1f05b38b4074d674c.jpg) +Part C. MD-GAN-based Architecture + +![](images/12a7ffca05ab706e4c5b6609a65fdf2301b70f22019495c09b13cae13284d61e.jpg) +Part D. DD-GAN-based Architecture + +![](images/455a544780c102ab3732612d2d11ccdd816c55ddf36f38780acd13758b685f1b.jpg) +Part E. GDMTD3-based Architecture +Fig. 5. The overall architecture illustrates various deep learning-based architectures designed to enhance anti-eavesdropping capabilities in UAV deployment scenarios. Part A presents a DNN-based architecture that processes air-ground and ground-ground channel states to determine UAV deployment. Part B shows an FL-DNN-based architecture, incorporating modules for reinforcement learning, DNN-based feature mapping, and FL. Part C depicts an MD-GAN-based architecture, where a generator produces trajectories and power outputs based on location and environment status, while a discriminator evaluates the decisions. Part D introduces a DD-GAN-based architecture, focusing on generating jamming solutions to maximize covert rates, with a discriminator distinguishing between jamming and non-jamming solutions. Part E illustrates a GDMTD3-based architecture, utilizing an experience replay buffer and diffusion reverse process to optimize UAV deployment strategies. + +the training step is 1). For a flight period of 100 seconds, the GAN-JTP algorithm achieves a $0.47\%$ increase in covert rate with a $0.15\%$ reduction in detection error probability compared to the BCD-based scheme [160]. + +The MD-GAN [69] relies on model-driven methods where the unknown channel information and detection threshold are treated as trained weights. Differently, the authors in [70] propose a data-driven GAN (DD-GAN) framework that focuses on generating data consisting of environmental parameters and optimal solutions to train the GAN. Specifically, the DD-GAN transforms the optimization process into an interactive game between the UAV and eavesdropper, where the UAV aims to maximize the covert rate, and the eavesdropper aims to detect the presence of covert communication. To address the non-convexity and lack of eavesdropper detection threshold information in the optimization process, the DD-GAN trains a generator (UAV) and discriminator (eavesdropper) adversarially, using genetic algorithm-generated samples as prior data, as shown in part D of Fig. 5. The generator produces power and trajectory solutions, while the discriminator evaluates the detectability. The loss function of the discriminator is designed to maximize the probability of correctly identifying real data and minimize the probability of being fooled by generated data. The generator's loss function aims to maximize the probability that the generated data is mistaken for real data by the discriminator. + +Besides GANs [69], [70], another generative model, the diffusion model, has advanced the effective representation + +of multi-dimensional data distributions [161]. The diffusion model can better capture the complex dynamics and the tradeoff in the multi-objective optimization problem concerning secure communication [112]. For example, The diffusion model captures complex state-action distributions, enabling adaptive beamforming and UAV repositioning under eavesdropper mobility. To tackle dynamic environments and high-dimensional action spaces in secure communication and energy efficiency multi-objective optimization problem, the authors in [71] propose GDMTD3, a Twin Delayed Deep Deterministic Policy Gradient (TD3) algorithm enhanced with generative diffusion models. Key innovations include integrating diffusion-based reverse processes into the actor network for robust policy generation and addressing continuous action spaces, as shown in part E in Fig. 5. The training process of GDMTD3 involves initializing the online critic and actor networks, interacting with the environment, and updating the network parameters based on the collected experiences. The actor network uses a generative diffusion model to sample actions, while the critic networks evaluate the actions using twin critic networks to reduce overestimation bias. Simulation results show that GDMTD3 outperforms DRL-based benchmarks (including PPO, TD3, and DDPG), achieving about $50\%$ higher cumulative rewards and around $21\%$ higher average secrecy rate than TD3. In addition, when the number of UAVs increases from 4 to 8, the average secrecy rate increases accordingly. However, increasing the number of UAVs from 8 to 16 raises + +TABLE VII SUMMARY OF AUTHENTICATION FOR COMMUNICATION CONFIDENTIALITY CIRCLES DESCRIBE THE METHODS; CORRECT MARKERS AND CROSS MARKERS REPRESENT PROS AND CONS RESPECTIVELY. + +
TechniquesReferenceAlgorithmPros & Cons
PUFs-based authentication[72]PUF\(s^1\)PUF-based dynamic session key generation and mutual authentication protocol +✓ Lightweight design with no stored secrets +✗ Potential overhead during temporary identity updates
[73]PUF-fuzzy extractorPUF-fuzzy extractor mutual authentication with TS-based dynamic session adaptation +✓ Dynamic session time adaptation minimizes idle periods and enhancing security +✗ Higher computational cost due to fuzzy extractors
[74]PUFs-fuzzy extractor-AEGISIntegration of PUFs, fuzzy extractors, and AEGIS for mutual authentication +✓ The proposed password/biometric update mechanism reduces server dependency +✗ Multiple cryptographic operations and protocols may be cause delay in the implementation
Channel based authentication[75]Rician channelPhysical-layer fingerprinting authentication based on Rician channel characteristics +✓ Optimal power allocation balances data, AN, and authentication tag transmission +✗ Reliance on encrypted tags requires high demand on UAV processing capabilities
[76]Rayleigh channelSNR difference-based PLA scheme +✓ The SNR-based design can be implemented without additional hardware infrastructure +✗ The simplified Rayleigh channel model may limit to real-world propagation environments
[77]Rayleigh/Rician channelsAD metric2for authentication under Rayleigh/Rician channels +✓ AD metric-based method improves the detection accuracy of authentication +✓ Detailed analysis of authentication performance under different propagation conditions +✗ Computational complexity in Rician channels due to hypergeometric functions
+ +Physical Unclonable Functions (PUFs) are hardware-based security primitives that exploit inherent and unique physical variations in devices to generate unclonable and unpredictable responses for communication authentication. +2Authentication Distance (AD) is a metric proposed in [77] to distinguish legitimate and illegitimate signals for communication authentication. + +energy consumption but only marginally improves secrecy rates, highlighting a performance-energy trade-off. + +Lesson Learned A key lesson learned is that deep learning, particularly through advanced architectures such as GANs [69], [70] and diffusion models [71], can address complex, dynamic environments with partial channel state information and unknown eavesdropper locations, while demonstrating superior performance over traditional methods [59], [65], [66]. These approaches demonstrate that deep learning not only strengthens the resilience of secure communications but also enables autonomous, real-time decision-making to counteract evolving eavesdropping threats in UAV networks. + +# B. Communication Authentication + +In the LAENet, as UAVs operate in open environments and rely on wireless communication, they are highly vulnerable to security threats such as node capture and man-in-the-middle attacks [46]. Ensuring secure and reliable authentication between UAVs and ground stations/users or among UAVs is critical to preventing unauthorized access [52], [162]. Traditional cryptographic authentication schemes often impose significant computational and memory overheads and incur considerable lantency, making them unsuitable for resource-constrained UAVs [163]. Recently, advancements such as PUFs and Physical-layer Authentication (PLA) mechanisms have opened new possibilities for lightweight and effective authentication in the LAENet. + +PUFs are a class of hardware security primitives that leverage the inherent manufacturing variations (such as variations in circuit delay or RF properties) in semiconductor devices to generate unique and unpredictable responses [164]. When a specific input is applied to a PUF, the device generates a corresponding response, forming a challenge-response pair that is unique to this device [164]. Such uniqueness and unpredictability make PUFs highly resistant to cloning and + +tampering, making them as a secure means for device authentication and key generation [165]. In addition, employing a PUF in a UAV allows for secure authentication without the need for complex cryptographic operations, making it an efficient solution for resource-constrained scenarios [166]. + +The work in [72] proposes a lightweight mutual authentication protocol, named SecAuthUAV, for securing UAV-ground station and UAV-UAV communications. SecAuthUAV employs PUFs in each UAV to generate a unique, unclonable session key that functions as a non-reproducible fingerprint. The protocol consists of three phases, as shown in Fig. 6. During UAV registration, a challenge-response pair from the UAV's PUF is stored, and a temporary identity is generated. In the UAV-ground station authentication phase, the UAV and ground station authenticate each other using challenge-response pairs and nonces, establish a session key, and update their identities. Lastly, in the UAV-UAV authentication phase, the GS facilitates secure communication by authenticating a second UAV and generates a session key for both UAVs. + +However, the work in [72] ignores the fact that the noise in PUFs can result in significant deviation in the output for the same input at different time points. In addition, [72] does not adjust the session time after establishing an authenticated session between two parties, which may lead to the active session remaining idle for a long time and thus give an opportunity for an adversary to interfere with the communication link. In light of this, the authors in [73] propose an UAV Authentication with Adaptive Session (UAAS) framework to address these challenges. Firstly, they combine PUFs and fuzzy extractors to address PUF noise. The fuzzy extractors consist of two phases: the $Gen(.)$ phase creates a key and non-sensitive helper data, and the $Rep(.)$ phase reconstructs the key from a noisy PUF response using the helper data while tolerating minor deviations. Then, the Thompson Sampling (TS)-based scheme is proposed to dynamically adapt the session time. + +![](images/1a454107197f0583fff2fc6224b7c4e7b0c3d8f3354f603056d3b238606f5780.jpg) +Fig. 6. The overall architecture of the PUF-based authentication schemes for UAV-GS and UAV-UAV communication in [72]. Part A illustrates the PUF-based authentication process between a UAV and a ground station (GS). The UAV sends its ID and a nonce to the GS, which responds with a hash value based on the PUF, UAV ID, and nonce. The UAV then sends a value derived from the PUF and another nonce, and the GS verifies the authentication by comparing hash values. Part B shows the PUF-based authentication between two UAVs (U1 and U2) through the GS. After establishing a session key with the GS, U1 requests a connection to U2. The GS facilitates the authentication by generating a new session key, which is securely shared between U1 and U2. + +TS is a probabilistic approach that balances exploration and exploitation, determining the session time based on the fraction of busy time to minimize idle periods and reduce the risk of adversarial interference. Although the security analysis demonstrates that UAAS improves the security level in the mutual authentication mechanism, its throughput is $20.38\%$ lower and computational cost is 126 ms higher than the baseline [72] due to security overhead. + +In the LAENet, while establishing mutual authentication between UAVs and ground stations is critical, it is also important to incorporate role-specific access controls for users to ensure communication confidentiality and preventing unauthorized access [167]. The work in [74] proposes an authentication framework PUF-enabled authentication framework for Internet of Drones (PAF-IoD) to establish mutual authentication among users, UAVs, and ground stations. Users need to authenticate with stations to access the stored data or communicate directly with UAVs, where the users' authentication mechanism includes three factors (identity, password, and biometric data). Similar to [73], PAF-IoD uses PUFs and a fuzzy extractor in the authentication process to generate a unique and tamper-proof session key while tolerating the noise in PUFs. Furthermore, the designed authenticated encryption with associative data (AEAD)-based encryption algorithm is utilized for encrypting and decrypting messages exchanged between the user, ground station server, and UAVs. + +In addition to leveraging intrinsic physical properties of hardware for authentication design through PUFs [72]–[74], the characteristics of communication channels can be used for authentication. The PLA mechanism authenticates devices by exploiting the unique physical characteristics of wireless communication channels, such as CSI, received signal strength (RSS), and signal-to-noise ratio (SNR) [168]. The main reason is that the wireless channel between two communicating entities exhibits location-specific and time-varying properties + +due to multipath propagation, fading, and environmental factors [169]. These diverse physical channel conditions, which provide a robust set of features for authentication, have been investigated in terrestrial communication networks [168]–[170]. Furthermore, the source of received signals can be accurately and promptly detected [170], making PLA particularly advantageous in the dynamic and complex communication environments of the LAENet. + +The authors in [75] leverage the unique properties of the physical layer channel, Rician channel, to develop a PLA for UAV-ground station communication. Given that UAVs receive signals subject to the Rician fading model, the ground station integrates authentication directly into the transmission process. It employs a one-way collision-resistant function (e.g. cryptographic hash function) that combines data symbols with a shared secret key to generate a low-power authentication tag for UAV and seamlessly embeds it into the transmitted signal. The authentication tag is validated by the correlation shaped by the Rician statistical characteristics of the fading channel, i.e., the correlation between the estimated tag (derived from the received signal) and the expected tag (generated using the secret key and decoded data symbols). + +However, the work in [75] still partially relies on cryptographic tag generation for authentication, which may not be suitable for UAVs with limited processing capabilities. The study in [76] leverages channel characteristics and geographical locations for PLA design, where the SNR differences between consecutive transmissions are utilized as the authentication metric. Specifically, a legitimate transmitter and a jammer have distinct channel variations due to differences in their geographical locations. The UAV authenticates the legitimate transmitter or jammer by formulating a binary hypothesis test based on the SNR difference between two successive transmissions. If the difference falls within a predefined threshold, the transmission is authenticated as from the legitimate + +![](images/f2a0eafa0a8185bc1fc9742760bf005b8217a9d36dcaad8f6cc2e5c56af12ae0.jpg) +Part A. Channel-based authentication in urban + +![](images/967f281427d80e32166ad3daced3cb08fb45a53cc42cab57a86b73497c509e23.jpg) +Part B. Channel-based authentication in suburban +Fig. 7. The overall architecture of the channel-based authentication in urban and suburban environments in [77]. Part A depicts the authentication process in an urban environment under Rayleigh channel conditions. The UAV receiver calculates the SNR, computes the AD, and compares it with a detection threshold to distinguish between legitimate and malicious sensors. Part B illustrates the authentication process in a suburban environment, where the UAV receiver performs similar steps to authenticate legitimate sensors and detect malicious ones under Rayleigh channel conditions. + +transmitter; otherwise, it is classified as a jammer. The closed-form expressions for the probability density function of SNR differences, false alarm rate (FAR), and miss detection rate (MDR) are derived under Rayleigh fading channels in single-UAV and dual-UAV scenarios. The non-convex optimization problem of minimizing MDR under FAR constraints is solved using an SCA algorithm, which outperforms the RSS-based baseline [90] by about $40\%$ . + +It is worth noting that study [75] may lack a comprehensive analysis of the UAV-PLA performance under different propagation conditions. Additionally, the detection performance may be further improved with other indicators. As shown in Fig. 7, the work in [77] proposes a novel PLA framework under different propagation conditions, including dense urban and suburban environments modeled by Rayleigh and Rician channels, respectively. A new metric, Authentication Distance + +(AD), is proposed as the normalized difference in received SNR between adjacent transmissions. For Rayleigh channels, closed-form expressions for FAR and MDR are derived using convolution and integration-by-parts, while Rician channels employ doubly non-central $F$ distributions to model AD statistics. Similar to study [76], this authentication framework minimizes MDR under FAR constraints. In dense urban settings, MDR depends on path loss and transmitter-UAV geometry. For suburban environments, it incorporates elevation angle-dependent Rician factors and path loss exponents to improve discriminability between legitimate and illegitimate signals. The proposed AD-based method outperforms the SNR-difference baseline [171], achieving 40–60% lower MDR. + +Lesson Learned. Leveraging physical-layer characteristics, such as PUFs and channel properties, in conjunction with communication models and optimization algorithms, has proven effective in enhancing authentication accuracy and reducing detection errors. However, some methods also reveal limitations. For instance, the assumptions of ideal channel conditions and the neglect of practical implementation constraints may limit the applicability of the proposed solutions [76], [77]. Future research should focus on addressing these limitations by exploring more practical channel models and considering the trade-offs between security and system complexity. + +# IV. COMMUNICATION AVAILABILITY FOR LAENET + +# A. Anti-Jamming Strategy + +Jamming attacks pose significant challenges to communication availability in the LAENet by disrupting legitimate communication links and degrading the performance of aircraft communication networks [79], [172]. As shown in Fig. 10, these attacks can exploit the openness and broadcasting nature of UAV networks, making them particularly vulnerable to interference [79]. Malicious jammers can transmit strong signals that weaken signal strength, degrade signal quality, and increase communication delays, leading to unreliable coverage and potential paralysis of the entire network [172], [173]. This vulnerability underscores the urgent need for effective anti-jamming technologies to ensure reliable communication in the LAENet. + +Various anti-jamming strategies have been explored to safeguard the LAENet against malicious jamming, mainly focusing on trajectory adjustment, as well as channel and power control. Overall, by adjusting the trajectory in the spatial domain, an UAV can evade jamming signals while maintaining reliable communication with legitimate devices [80], [173]. Besides the spatial-domain anti-jamming strategy, the UAV can implement a frequency-domain-based anti-jamming scheme. The UAV can select legitimate channels while avoiding jamming signals and control transmit power to minimize energy consumption and latency under jamming attacks [83], [84]. + +Convex optimization methods can be used to adjust the UAV's trajectory to achieve anti-jamming by strategically guiding its movement to reduce interference and enhance communication reliability [80], [173]. It provides a systematic and efficient approach to handle the complex, non-convex + +TABLE VIII SUMMARY OF ANTI-JAMMING STRATEGY FOR COMMUNICATION AVAILABILITY CIRCLES DESCRIBE THE METHODS; CORRECT MARKERS AND CROSS MARKERS REPRESENT PROS AND CONS RESPECTIVELY. + +
TechniquesReferenceAlgorithmPros & Cons
Convex optimization[80]BCD, SCABCD and SCA for UAV 3D trajectory optimization for anti-jamming +✓ Probabilistic LoS performs well in real-world scenarios such as urban environments +X High computational complexity may be challenging in resource-constrained environments
[81]SCA, DinkelbachSCA and Dinkelbach algorithm for energy-efficient trajectory optimization under malicious jammers +✓ Balance between throughput and energy consumption in anti-jamming +X Assume static and LoS-dominated channels
[82]BCD, SCABCD and SCA for joint UAV trajectory and transmit power optimization under jamming +✓ Improve throughput by considering transmit power optimization against jammers +X Assume a fixed UAV altitude and a static channel environment
Multi-agent RL[87]MALQLCollaborative MALQL algorithm for anti-jamming with channel and power allocation +✓ Accelerate convergence compared to single-agent Q-learning +X Assume predefined UAV trajectories limits to adaptability
[88]MARLMARL with adversarial pre-training for dynamic and generalized jamming +✓ Generalize to various jamming patterns via adversarial populations for pre-training +X Pre-training for generalized jamming may require significant offline resources
[89]MATD3MATD3 algorithm with PER for dynamic resource management under jamming attacks +✓ Handle high-dimensional continuous action spaces +X The integration of PER and spectrum sensing may increase the computational complexity
+ +problems that arise when optimizing UAV trajectories and various constraints under malicious jamming conditions [131]. The work in [80] investigates anti-jamming 3D trajectory design for UAV-enabled wireless sensor networks under a probabilistic LoS channel model. The probabilistic LoS model accounts for elevation angle-dependent shadowing effects in urban environments compared with simplified LoS models. The BCD and SCA algorithms are employed to optimize the UAV's horizontal and vertical trajectory, allowing the UAV to move closer to the ground station for improved transmission rates while dynamically adjusting its elevation angle relative to the jammer to mitigate interference. + +However, the anti-jamming trajectory optimization in [80] under the probabilistic LoS model does not consider the energy consumption issue. The study in [81] utilizes SCA and Dinkelbach's algorithm to adjust the UAV's trajectory to avoid areas with jammers while maximizing energy efficiency, which is defined as the ratio of total throughput to propulsion energy consumption during flight. Compared to hovering-centric benchmarks, the optimized trajectory reduced energy consumption by $82\%$ while maintaining $73.16\%$ of the sum throughput. It is worth noting that the transmit power of the UAV and station is fixed in [81], whereas power optimization is also an important factor for energy efficiency. The authors in [82] use the SCA and BCD algorithms to maximize throughput by iteratively optimizing power allocation (via convex reformulation of throughput bounds) and UAV trajectory (via slack variables for distance constraints and jamming mitigation) to avoid jamming signals. The proposed scheme achieves $40\%$ higher throughput compared to the "Line trajectory with fixed power" baseline. + +While convex optimization methods [80]–[82] work well for fixed jamming patterns, they may struggle to handle dynamic, intelligent jamming [174] in real-time due to their reliance on global information and the challenges inherent in solving nonconvex problems with increased optimized variables [142]. In contrast, RL and DRL offer significant advantages by enabling autonomous, adaptive decision-making [143], [147]. These + +approaches can continuously adjust to environmental changes, learn from past interactions, and optimize performance in real-time [144], [175]. The RL-based anti-jamming methods have emerged as a promising solution due to their ability to operate without excessive prior information (such as unknown environment, CSI, and jamming mode) [147]. Single-agent RL algorithms have been used in previous works to develop anti-jamming strategies in communication networks by regarding jammers and other legitimate users as part of the environment, including independent anti-jamming channel selection methods [83]–[86]. However, these single-agent approaches may fail to converge when dealing with a large number of agents or a high-dimensional action-state space [87], making them impractical for complex, multi-agent scenarios in the LAENet. To address these limitations, multi-agent RL (MARL) methods have been proposed to allow each agent to make decisions based on local information and exchange data with others (such as observations or model parameters). + +The study in [87] proposes a collaborative multiagent layered Q-learning (MALQL) algorithm for anti-jamming communication in UAV networks by jointly optimizing channel and power allocation to maximize system Quality of Experience (QoE). The problem is modeled as a local interaction Markov game based on the constructed interference graph. The MALQL divides the problem into two subgames of channel selection (Layer 1) and power allocation (Layer 2), as shown in part B of Fig. 10. The channel layer uses a graph-based interference model to capture mutual interference among UAVs. Each UAV is represented as a node, and edges are formed between UAVs that are within a predefined interference distance. This model allows UAVs to identify and avoid channels that are being used by neighboring UAVs or jammed by external attackers, thereby reducing the jamming likelihood. The power layer optimizes transmit power to meet rate thresholds. Theoretical analysis confirms that MALQL can converge to a pure strategy Nash equilibrium. + +Nevertheless, there are still some issues with the anti-jamming mechanism in [87]. Considering that the rapid mo + +![](images/139af8a9a84b86d4198cc46ae8f1d98f29cd82458280ac122e062d30d4140bb7.jpg) +Part A. Overall system model under jamming + +![](images/4cb234887973d9ac25e079403f546e492bf9527c15b28c61b6fc521af34f7172.jpg) +Part B. MALQL-based anti-jamming scheme + +![](images/fcb150feb0c7dfcec3aa13563eba74bebc6a468a677e0d900fc102f3fcfec36c.jpg) +Part C. Population update of pre-training for generalized anti-jamming scheme + +![](images/bc80ee2b36b7d014602c429392c608bf414042951a60d35efc799078722210b7.jpg) +Part D. PER-MATD3-based anti-jamming scheme + +![](images/7c7009c9e038fcda366a5af32ce1c4b450452de01631391440f888fae30fd149.jpg) +Actor-Critic network for agent k +Fig. 8. The overall architecture illustrates various reinforcement learning-based anti-jamming schemes designed to enhance communication reliability in UAV-assisted MEC systems under jamming attacks. Part A presents the overall system model, depicting UAVs and jammers interacting within a dynamic environment. Part B shows the MALQL-based anti-jamming scheme, where agents use layered Q-learning to determine actions based on local observations and rewards. Part C depicts the population update mechanism for pre-training a generalized anti-jamming scheme, involving a jammer population, trajectory encoder, and decoder network to optimize jamming policies. Part D introduces the PER-MATD3-based anti-jamming scheme, incorporating a priority experience replay buffer and actor-critic networks to dynamically allocate resources and optimize UAV deployment strategies. + +bility of UAVs may expose them to various and unknown jamming patterns due to frequent transitions to new scenarios, the anti-jamming methods need to be generalized [176], especially in the LAENet. The work [87] randomly initializes strategies and learns from scratch for a particular deployment environment with no pretraining, which may lead to a reduction in the generalization ability of the anti-jamming strategy. In light of this, the authors in [88] introduce an adversarial pre-training stage in the proposed two-stage MARL with a decentralized partially observable Markov decision process. Specifically, the adversarial pre-training stage uses a quality-diverse jammer population (e.g., fixed, random, sweeping, statistic, and RL-based jamming) to bootstrap generalized anti-jamming strategies instead of directly initializing the agents with random anti-jamming policies, as shwon in part C of Fig. 10. This pre-training ensures that UAVs are not overfitted to specific jamming patterns and can generalize to new jamming attacks in real-world deployments. The pre-trained policies are deployed in the fine-tuning stage, where a graph convolutional-based MARL algorithm is proposed to jointly optimize channel selection and power allocation for anti-jamming similar to [87]. Simulation results show that the proposed solution achieves $20 - 30\%$ higher cumulative rewards than collaborative multi-agent Q-learning [177] and independent Q-learning [83] under fixed and sweeping jamming. + +Note that previous RL-based anti-jamming strategies [87], [88] mainly rely on the Q-learning method, which is suitable for discrete action spaces but may be limited in dealing with high-dimensional continuous spaces [147], [148]. The authors in [89] propose a PER-MATD3 algorithm against jamming + +by integrating spectrum-aware channel selection and prioritized experience replay (PER) into an MADRL framework, as shown in part D of Fig. 10. The proposed spectrum-aware intelligent channel selection uses energy detection-based spectrum sensing, enabling UAVs to identify and avoid jammed channels. The TD3 is specifically designed to handle continuous-valued states and actions, where two critic networks, target policy smoothing, and delayed policy updates are used to further stabilize DRL training. By leveraging PER, the agents can learn from high-error experiences, thereby accelerating adaptation to time-varying CSI, imperfect jamming detection, and co-channel interference. By jointly optimizing CPU frequency, bandwidth allocation, and channel selection to minimize the impact of jamming, PER-MATD3 reduces system cost (a linear combination of latency and energy consumption) by approximately $16.7\%$ , $9.1\%$ , and $1.2\%$ compared to the baselines of Q-learning, MATD3-JSC (without PER), and PER-MATD3 (without channel selection), respectively. + +Lesson Learned. Recent advancements in anti-jamming strategies show that intelligent decision-making for trajectory control, channel selection, and power control is essential for effective jamming mitigation. A key takeaway is the successful integration of MARL to develop dynamic and adaptive anti-jamming solutions [75]. By employing intelligent algorithms such as adversarial pre-training and decentralized decision-making, UAV networks can generalize anti-jamming strategies across diverse environments [76], [77]. However, challenges persist in the generalization of these strategies across various jamming types and environmental conditions, as well as balancing the trade-offs between energy consumption, + +![](images/c26ca86189ba5e15945340467e93f9446f6ab0e20c20d86854d84170b44e6d6a.jpg) +Part A. ML-based spoofing detection + +![](images/2ab8ba0297c1868c628e1e48264ec055ad4506c4314985e9ce67835ee4420c3e.jpg) +Part B. Rule and ML-based spoofing detection +Fig. 9. The overall framework of ML and rule-based spoofing detection for GPS spoofing detection in the LAENet. Part A depicts an ML-based spoofing detection mechanism in [93], where multiple CNN classifiers are trained with updated sample weights to form an integrated classification model. Each CNN transfers its optimized parameters to subsequent classifiers, enhancing the model's robustness. Part B presents a hybrid rule and ML-based approach in [94], where statistical analysis of path losses between UAVs and multiple base stations (BSs) is performed by edge servers. The analyzed data is processed through MLPs to generate individual predictions, which are aggregated to produce a final spoofing detection decision. + +latency, and throughput. Future research could delve into the integration of more adaptive learning frameworks (such as deep learning) into the LAENet for anti-jamming, enabling it to better manage partial or imperfect environmental observations for low-latency, real-time decision-making in multi-UAV systems. + +# B. Spoofing Defense + +In the LAENet, the openness of A2G transmission channels and the dynamic nature of low-altitude aircraft networks make them particularly susceptible to identity-based spoofing attacks [50]. In such attacks, a malicious entity impersonates a legitimate transmitter using falsified identity information, such as a spoofed media access control address, to gain unauthorized access to the network [52]. Once authenticated, the spoofer can disrupt communications among aircraft by launching more severe attacks, such as rogue access point infiltration and denial-of-service attacks, ultimately leading to network congestion and service outages [75]. Given the limitations of conventional authentication methods that rely on complex cryptographic protocols [52], PLA offers a promising alternative by leveraging the inherent and unique physical characteristics of wireless transmissions for the LAENet, which is introduced in Section III-B. Overall, this type of PLA can defend against spoofing + +attacks in the LAENet by exploiting the unique characteristics of the wireless channel (such as RSS, Rayleigh channel, and Rician channel) to identify and separate between legitimate devices and spoofers. + +The work in [90] proposes a PLA framework to detect spoofing attacks by exploiting spatial correlations of RSS in A2G channels. The key idea is that the RSS from a legitimate transmitter will remain relatively consistent due to its fixed location, while the RSS from a spoofer will vary significantly because of its different position and channel conditions. Thus, the UAV receiver can perform a hypothesis test to authenticate incoming signals. if the RSS distance between the current signal and a previously authenticated signal is below a predefined threshold, the signal is accepted as legitimate. Otherwise, it is flagged as a spoofing attempt. However, the work [90] is considered under an ideal transmission scenario, where the propagation environment is perfectly exempted from external interference. To address this limitation, the authors in [91] develop a PLA framework that accounts for channel randomness and interference uncertainty. First, they model the G2A link as a Rayleigh fading channel. Then, they introduce jamming signals as external interference. By modeling the jamming power statistically and incorporating it into the analysis of detected power differences, if the difference in power exceeds a predefined threshold, it is identified as a spoofing attempt. Thus, even in real-world scenarios with interference, the proposed framework can better differentiate between natural channel fading and anomalies caused by spoofing attacks. + +In addition to using the statistical properties of the Rayleigh channel to design PLA against spoofing in environments with multipath fading (such as urban areas), the channel characteristics in suburban environments should also be considered. To address this, the work [77] proposes a PLA framework to counter spoofing attacks in both urban (Rayleigh channel) and suburban (Rician channel) environments. As mentioned earlier (in Section III-B), a new metric AD is devised to distinguish between legitimate signals and spoofing signals based on differences in channel randomness and geographical factors, such as elevation angles and distances. Adopting the unique fading characteristics of Rayleigh and Rician channels makes it statistically difficult for a spoofer to accurately mimic a legitimate signal. By considering elevation angles and distances in channel modeling, it ensures that a spoofer cannot easily replicate a legitimate signal even if the spoofer knows the legitimate transmitter's location. Simulation results show that the probability of a successful spoofing attack is significantly reduced compared to the baseline [171], where the spoofing miss detection probability drops to 0.014 in suburban environments and 0.371 in dense urban areas. + +In the LAENet, in addition to being vulnerable to identity-based spoofing attacks, aircrafts are also susceptible to signal spoofing attacks from the Global Navigation Satellite System (GNSS), particularly GPS spoofing, which poses a significant security threat by generating and transmitting counterfeit satellite signals resulting in severe positioning deviations [25]. By interfering with or suppressing legitimate GNSS signals, attackers can manipulate UAV locations in an imperceptible manner to mislead UAVs, causing deviations from intended + +TABLE IX SUMMARY OF SPOOFING DEFENSE FOR COMMUNICATION AVAILABILITY CIRCLES DESCRIBE THE METHODS; CORRECT MARKERS AND CROSS MARKERS REPRESENT PROS AND CONS RESPECTIVELY. + +
TechniquesReferenceAlgorithmPros & Cons
PLA[90]RSSSpatial correlations of RSS distances in PLA against spoofing attacksUse RSS-based channel characteristics to reduce PLA computational complexityAssume an ideal transmission scenario without external interference
[91]Rayleigh channelDefend against spoofing attacks by considering channel randomness and jammingSimultaneously address spoofing and jamming attacks via PLAAssume static UAVs and a known jamming distribution
[77]Rayleigh and Rician channelsAD-based PLA for spoofing defense under Rayleigh and Rician channelsProvide a thorough analysis of spoofer identification in urban and suburban environmentsAssume perfect CSI in channel modeling
GNSS spoofing detection[92]Rule-based detectionCombine cooperative localization mechanism with Stackelberg game against spoofing attacksSpoofing detection is based on neighboring UAV signal sources without predefined thresholdsExtending to larger UAV groups may require complex adjustments
[93]ML-based detectionImproved AdaBoost-CNN for multi-modal spoofing attack identificationHigh accuracy in identifying spoofing attacks with limited data samplesDependence on predefined signal features may lead to model overfitting
[94]Rule & ML-based detectionMLP and statistical feature extraction on path-loss data for detecting GPS spoofingNo additional hardware/energy burden on UAVsRobust performance under sparse base station coverageSpoofing detection performance degrades in areas with unstable cellular signals
+ +flight paths, violations of no-fly zone regulations, or increased collision risks [46]. Given a critical role of GNSS in UAV operations, effective detection and mitigation strategies for spoofing attacks are essential to ensure flight safety and prevent security breaches in the LAENet. Currently, studies on signal spoofing attack recognition in the LAENet mostly focuses on recognizing GNSS spoofing attack detection, which primarily falls into two categories with respect on rule-based and ML-based methods [19], [25]. Rule-based detection methods typically assess the relative distance and positional deviations of UAVs to determine if they are under GNSS spoofing attack. On the other hand, the ML-based methods pay attention to recognize different spoofing types by learning the characteristics of received signals. + +Generally, the simplified rule-based methods determine whether a UAV has encountered spoofing attacks based on whether its trajectory follows a predictable path [178], [179], since a UAV may exhibit deviations from this path due to the false signals imposed by the spoofer. If the measured deviations exceed predefined thresholds, it indicates a potential spoofing attack. However, relying on predefined thresholds for detecting deviations may not dynamically adapt to the spoofing attacks. The study in [92] proposes a defense mechanism based on cooperative localization, where each UAV uses the relative distances and real locations of neighboring UAVs to detect spoofing attacks. Specifically, each UAV measures its relative distances based on alternative signal sources of neighboring UAVs and compares these results with its own GPS-derived location. If inconsistencies are detected (e.g., the GPS-derived location does not match the majority of the calculated locations), the UAV identifies itself or a neighboring UAV as being under attack. To optimize defense deployment, an equilibrium of a dynamic Stackelberg game is derived between the drone operator (leader) and the spoofer (follower). Simulation results show that the proposed scheme can effectively prevent spoofer's capture, while random/deterministic baselines suffer from attackers capturing one to two UAVs. + +Recent ML-based methods for spoofing defense primarily focus on binary classification concerning normal GPS signals and spoofing signals [180], [181]. However, they fail to recognize specific types of spoofing attack necessary for countermeasures in complex environments. Hence, there is an urgent need to recognize diverse GPS spoofing attack patterns for effective countermeasures for the LAENet. The authors in [93] propose an improved AdaBoost-CNN algorithm to address the challenge of recognizing diverse GPS spoofing attack patterns for UAVs, as shown in part A of Fig. 9. Three categorized spoofing attack patterns are considered including static and dynamic spoofing based on the UAV's motion state, power-matched and overpowered spoofing based on signal power, and position and time spoofing based on the spoofing targets. The authors select key GPS spoofing signal features such as signal quality monitoring, carrier-to-noise ratio, Doppler shift, and clock error to train the classification model. The improved AdaBoost-CNN algorithm integrates multiple weak CNN classifiers into a strong classification model. Each CNN base classifier uses the updated network parameters from the previous CNN as initial values, enabling iterative refinement of network weights to enhance feature extraction and generalization. With 800 simulated samples, the improved AdaBoost-CNN achieves $100\%$ accuracy, outperforming original AdaBoost-CNN $(94.38\%)$ , CNN $(74.38\%)$ , DNN $(60.94\%)$ , SVM $(40.63\%)$ , and KNN $(53.13\%)$ . + +Furthermore, integrating rule-based approaches with machine learning-based methods provides an effective and robust defense against spoofing attacks. The work in [94] leverages statistical features of path losses between UAVs and terrestrial base stations to detect a UAV's trajectory deviation due to GPS spoofing, as shown in part B of Fig. 9. The spoofing detection is formulated as a nonlinear optimization problem that aims to minimize hypothesis test errors by adjusting thresholds, statistical feature weights, and the number of base stations. To further accurately analyze path loss's statistical features for final decisions on predicting GPS spoofing probabilities, + +TABLE X SUMMARY OF ANOMALY DETECTION FOR COMMUNICATION INTEGRITY CIRCLES DESCRIBE THE METHODS; CORRECT MARKERS AND CROSS MARKERS REPRESENT PROS AND CONS RESPECTIVELY. + +
Anomaly typeReferenceAlgorithmPros & Cons
Jamming[98]HDBNSA module based on HDBN for detecting jamming anomalies +✓ UAccurately characterize and detect jamming anomalies via KLD/DB metrics +✗ Unstable initialization in unsupervised learning affects the performance of the HDBN
[99]GDBNGDBN to model the radio environment and detect and classify jamming anomalies +✓ Unsupervised learning eliminates dependency on labeled data in classification of anomalies +✗ Computational complexity increases with the number of jamming categories
[100]Active-GDBNActive-GDBN used to model UAV-jammer interactions for anomaly detection +✓ Actively incorporate UAV's actions for faster adaptation and jamming detection +✗ M-MJPF requires significant computational resources
[101]Blind channel estimation & ACSBlind channel estimation based on ACS properties to detect jammer signals +✓ Does not rely on prior knowledge of the jammer's behavior +✗ Assumes a specific structure of the multicarrier modulation format
Abnormal Power[102]Spectrum surveillanceLocal and cooperative detection of abnormal power emission +✓ Handle both aggressive and passive power misuse +✓ Cloud-based framework enables real-time closed-loop surveillance +✗ Computational complexity increases with the number of SN
Eavesdropping[103]SVM & K-meansOne-class SVM and K-means clustering for detecting eavesdropping anomalies +✓ One-class SVM and K-means are stable under varying eavesdropper power +✗ Detection performance mainly depends on the quality and quantity of the ATD
+ +multilayer perceptron (MLP) neural networks are deployed on edge cloud servers, where individual MLP models at each BS are used to analyze statistical features of path losses. Simulation results show that the proposed method achieves $97\%$ accuracy with two base stations and $83\%$ accuracy with a single base station, outperforming baseline approaches such as adaptive trustable residence area (ATRA), which necessitates three base stations for triangulation [182]. + +Lesson Learned. For identity spoofing in the LAENet, leveraging signal features such as received signal strength and channel randomness in PLA design is an effective approach [77], [90], [91]. On the other hand, employing rule-based or ML-based techniques can detect and mitigate GNSS signal spoofing [92]–[94]. While ML-based methods show promising performance, they are limited by factors such as computational complexity and dependency on large datasets. Rule-based methods are simpler but may struggle in dynamic or uncertain environments. Future research could explore the application of RL to develop adaptive and robust spoofing defense mechanisms in the LAENet, which has not yet been extensively studied. Different from the abovementioned approaches, RL dynamically learns from interactions with the environment, and its sequential decision-making ability enables UAVs and ground stations to optimize spoofing defense strategies based on continuous feedback [147], make it a promising direction for enhancing spoofing defense in the LAENet + +# V. COMMUNICATION INTEGRITY FOR LAENET + +# A. Anomaly Detection + +Due to the open nature of wireless channels and the dominant LoS links in the LAENet, communication becomes particularly vulnerable to a diverse range of anomalous behaviors such as abnormal jamming, abnormal transmission power, and covert eavesdropping [46], [49]. Specifically, malicious jammers sense spectrum activity and dynamically adapt their interference patterns to mislead the UAV into taking suboptimal or harmful actions [81], [95]. In parallel, abnormal + +power emissions, either due to device faults, selfish behavior, or malicious intent, can violate spectrum policies, introduce harmful interference, and disrupt cooperative spectrum sharing [96]. Additionally, the pervasive risk of eavesdropping is that adversaries exploit the UAV's uplink or downlink transmissions to intercept sensitive data [61], [67]. Thus, it is essential to detect and mitigate these abnormal activities in the LAENet. Different from previously reviewed approaches such as anti-eavesdropping (Section III-A) and anti-jamming (Section IV-A), anomaly detection is a method used to identify and mitigate unexpected deviations from or irregularities in normal operational patterns by monitoring communication channels in the LAENet [127], [183]. + +Jamming anomalies generally aim to disrupt the normal operation of UAV communication links, such as by injecting disruptive signals to interfere with the legitimate communication process. The study in [98] proposes a novel Self-Awareness (SA) module to leverage the radio to detect abnormal behaviors caused by jamming attacks for Cognitive UAV communications. The SA module unsupervisedly learns a generative model using a Hierarchical Dynamic Bayesian Network (HDBN) [184] to represent the joint distribution of random variables characterizing the radio environment at different levels of abstraction and across time, where the Modified Bayesian Filtering [185] is used to integrate multilevel abnormality measurements for online predictions of radio environmental states at different levels. Since jamming can disrupt and shift the distributions of the radio environment, the abnormalities can be detected by calculating the Kullback-Leibler Divergence (KLD) and Dhattacharyya distance (DB) [186] between predictive messages and diagnostic messages. The predictive messages are generated by the HDBN to capture the expected patterns of normal signals, and diagnostic messages reflect the actual state of the signal. The jammer's impact is characterized by calculating generalized errors based on shifts in amplitude, phase, and frequency of signals, allowing the radio to predict future activities of the jammer. The SA + +![](images/77a3923ca021fd3efb360f7086344f55ac8a6517a1d3d497b159cd347884c6bc.jpg) +Part A. HDBN-based Scheme + +![](images/90cd29842c633091f87cc0f88b31ff42f3aa3d723ca41aa33e5bffc52b6ea94e.jpg) +Part B. GDBN-based Scheme + +![](images/57804c0b57ed072e72c64d0c713f6844a0ec91912cdd542854e4d53830070bae.jpg) +Part C. Active-GDBN-based Scheme +Fig. 10. The overall architecture illustrates jamming anomaly detection to enhance communication integrity in the LAEnet. Part A presents an HDBN-based scheme focusing on hierarchical dynamic models to predict and detect abnormal signals caused by jammers. It details the transition probabilities between model states and the prediction of continuous states based on discrete superstates. Part B introduces a GDBN-based scheme, extending the HDBN approach by incorporating generalized states and observations, allowing for more nuanced predictions and error calculations. Part C depicts an Active-GDBN-based scheme, integrating UAV actions into the model to actively infer and adapt to the environment, thereby optimizing resource allocation and anti-jamming measures. + +module achieves a near $100\%$ abnormality detection accuracy, approximately $12\%$ higher than the traditional energy detector-based scheme. + +Different from the previous work [98], which introduced the SA module using HDBN for anomaly detection, the authors in [99] propose a Generalized Dynamic Bayesian Network (GDBN)-based framework to enhance the SA module by further classifying the detected anomalies caused by multiple jammers. A generalized state-space model [184] is used to represent the evolving radio environment as a GDBN model learned in an unsupervised manner. Different from the KLD/DB metric in [1], Kullback-Leibler divergence and Bhattacharyya distance are used as abnormality measurements between predicted and observed signals to detect jamming. Once an abnormality indicative of jamming is detected, the UAV extracts the interfering signal and compares it with prelearned GDBN models (each representing a different jamming modulation scheme). By evaluating which pre-learned model best explains the extracted jamming signal, the UAV can not only detect the presence of a jammer but also classify its modulation type. Simulation results show that the GDBN-based method achieves an overall classification accuracy of $98\%$ at $\mathrm{SNR} = 10$ dB, outperforming LSTM $(88\%)$ , CNN $(67\%)$ , and SAE $(90\%)$ . + +Based on the study [99], the authors in [100] propose an Active-GDBN to model the dynamic interaction between the UAV and jammer for anomaly detection. Similar to [99], the generalized state-space model [184] is used to capture the features and dynamic evolution of UAV signals to represent the radio environment. Differently from passive detection and classification of jamming signals in [99], the Active-GDBN achieves active anomaly detection by incorporating the UAV's actions into the inference process. Specifically, the UAV employs a Modified Markov Jump Particle Filter (M-MJPF) [187] to link the UAV's actions to environmental + +states and observations. Meanwhile, the UAV dynamically adjusts physical resource block selections to evade jamming by encoding jammer behavior and updating beliefs. The Active-GDBN achieves about $25\%$ to $37.5\%$ faster convergence on anomaly detection probability than the Q-learning-based baseline under various jamming types. + +Different from previous works [98]–[100] that detect jamming anomalies based on the statistical distribution divergence of the signal, study [101] focuses on detecting anomalies by exploiting the time delays, shifts, and modulation of the signal characteristics. Firstly, achieving blind channel estimation involves constructing cyclic correlation matrices to identify distinct Doppler shifts and time delays associated with transmissions by exploiting the inherent almost-cyclostationary (ACS) properties of UAV and jammer signals (e.g., periodic statistics from OFDM modulation). Then, this blind estimation process is combined with a widely linear minimum mean square error (WL-MMSE) filter to provide an initial estimate of the symbol vector by leveraging the non-circular statistics of the received signal, where the initial estimate includes contributions from both the UAV and the jammer. Finally, a post-sorting algorithm (PSA) is employed to iteratively decode and cancel the jammer's contribution by ranking and removing symbols with the highest signal-to-disturbance-plus-noise ratio (SDNR). Simulation results demonstrate that the proposed scheme can effectively detect and separate the jamming signals from UAV signals without requiring prior knowledge of the jammer's characteristics, even when the jammer's power is twice as strong as the UAV's power. + +In addition to jamming anomalies, which cause interference and security threats in the LAENet, abnormal power emissions in UAV communication networks also represent a critical type of anomaly, potentially leading to severe disruption of communication quality and violation of spectrum policies. The work in [102] proposes a cloud-based surveillance framework + +TABLE XI SUMMARY OF INJECTION DEFENSE FOR COMMUNICATION INTEGRITY CIRCLES DESCRIBE THE METHODS; CORRECT MARKERS AND CROSS MARKERS REPRESENT PROS AND CONS RESPECTIVELY. + +
Injection typeReferenceAlgorithmPros & Cons
Jamming signal[98]HDBNHDBN-based jamming signal extraction and suppression +✓ Autonomous learning from raw I/Q data enables adaptability to dynamic jamming patterns +X Assume the jammer's output power remains constant during attacks
[101]SICSIC with blind channel estimation for detecting and eliminating jamming signals +✓ Eliminate jamming signals regardless of the mobility patterns of jammers +X Rely on sufficient cyclostationary features in the received signal
[104]DBFDBF algorithm for nullifying jamming signals +✓ Effective suppression of jamming signals while maintaining carrier phase integrity +X May be limited to specific GNSS frequency bands
Spoofing signal[105], [106]API & LSRSIC combined with API and LSR to recover legitimate signals from spoofing attacks +✓ SemperFi with a single antenna does not require additional hardware +X Limited to attackers with a power advantage lower than 15 dB
[107]Subspace projectionSubspace projection for nullifying spoofing signals +✓ Low parameter dependency, requiring only code delays and carrier frequencies +X Suppression performance declines if spoofing and legitimate signals have similar code delays
+ +to address the detection of abnormal power emissions, where the cloud server assigns spectrum resources to the UAVs and shares UAVs' spectrum usage information with the surveillance center. The surveillance center assigns the detection task to $K$ surveillance nodes (SNs) for local detection of abnormal power emission, where the detection rule is based on the Lagrange multiplier method and the generalized likelihood ratio test. After local decisions, $K$ SNs report results to the surveillance center, where cooperative detection of abnormal power emission is performed using the decision rule that declares an abnormal event when at least $L$ out of $K$ nodes detect an abnormality, where the optimal global threshold of $L$ is determined by solving the constraints on the global false-alarm probabilities. Simulation results show that the global detection probability exceeds 90% when transmit power deviation exceeds 0.02W (allocated power is 0.01W). + +Besides the threats of jamming and abnormal power emission, another critical anomaly that requires detection is eavesdropping in the LAENet, where malicious devices covertly intercept sensitive information during UAV-to-ground and UAV-to-UAV transmissions [66], [67]. Note that most previous works on anti-eavesdropping focused on measuring secure performance through secrecy rate and/or secrecy outage probability (such as [71], [76]) rather than emphasizing the detection of eavesdropping attacks. The work in [103] explores anomaly detection for eavesdropping attacks in UAV-aided wireless systems using unsupervised learning. Two datasets are prepared: artificial training data (ATD), simulated without eavesdropping based on CSI (all labeled normal), and a practical dataset extracted from received signal features (mean and variance of amplitude). Two types of unsupervised learning methods are designed for anomaly detection. One-class SVM maps data to a high-dimensional space, defining a normal region where outliers are detected. K-means clustering classifies test data into two clusters, labeling the one nearest to the ATD center as normal. + +Lesson Learned For jamming anomalies, the statistical distribution divergence detection and signal structural feature-based detection, such as HDBN, GDBN, and ACS, are used to model the dynamic environment and detect deviations from + +learned normal patterns. For abnormal transmission power detection, a cloud-based surveillance framework supports a statistical distribution detection approach to monitor and identify power emission outliers. Leveraging its high computing power, the cloud enables cooperative analysis through multi-source data aggregation, dynamically optimizes detection thresholds using global information, and maintains a feedback loop for adaptive anomaly detection. For eavesdropping detection, unsupervised learning techniques, including One-Class SVM and K-means clustering, achieve the identification of anomalies in received signals. These approaches effectively achieve anomaly detection and demonstrate excellent performance. However, challenges remain, including the reliance on high-quality training data and the complexity of maintaining real-time adaptability in dynamic spectrum environments. Currently, Generative AI such as GANs and generative diffusion models presents a promising research direction for anomaly detection, as demonstrated in the use of generalized models in HDBN and the artificial data generation for training ML and clustering models in [188], [189]. Generative AI could further enrich training datasets and provide a high-level generative model to enhance anomaly detection in the dynamic and uncertain LAENet. + +# B. Injection Defense + +The low-altitude economy is highly dependent on open communication and network architecture with dense communication links, which brings injection attacks as a significant threat to UAV communication integrity [28], [46]. These attacks involve the deliberate injection of malicious signals, such as jamming and spoofing signals, to disrupt or manipulate legitimate communications [97], [190]. Jamming signal injection can make legitimate signals unrecognizable by emitting high-power electromagnetic interference to degrade signal reception [98]. Additionally, spoofing signal injection can transmit high-power signals to overshadow legitimate GNSS signals. Therefore, eliminating injection signals or separating them from legitimate signals is crucial for ensuring communication integrity in the LAENet. + +![](images/0a2b881bbb436b377c3415cb335afd7cf07d689d5c423c459fd8a5651c2607af.jpg) +Part A. SNDR-based SIC for jamming injection defense + +![](images/95fa115c55a605022b9fd1a951542bb487ecec131003b7074dab8d10a903cb98.jpg) +Part B. SIC with API and LSR for spoofing injection defense + +![](images/74dd6b7f5134e0d980159677bb8d2dbfb39a1c546207ad08c741ba3fb68171b0.jpg) +Part C. Subspace projection for spoofing injection defense +Fig. 11. The overall architecture of injection defense mechanisms for UAVs in smart city applications. Part A presents the SIC architecture that processes channel state information to defend against jamming injection attacks [101]. Part B shows an SIC architecture integrated with API and LSR modules, which subtracts injection signals from the received signal to recover normal signals [105], [106]. Part C depicts a subspace projection-based architecture for spoofing injection defense, where the received signal is projected onto the orthogonal null space of the spoofing signals to eliminate them [107]. + +The UAV's communication can be severely disrupted by jammers that exploit LoS propagation to inject jamming signals into the transmission channel, which may effectively mask legitimate signals and render them unrecognizable [101]. The work in [98] proposes an HDBN-based injection defense scheme to extract and remove the jammer's signal. This work first utilizes the HDBN to detect abnormal behaviors caused by jamming attacks, as mentioned earlier in Section V-A. Once the jammer's presence is confirmed, its signal characteristics are analyzed across multiple levels of continuous in-phase (I) and quadrature (Q) components and observation-level state vectors [191]. The extracted jammer signal is then separated from the received observation using frequency-domain subtraction [192], component-wise I/Q processing, and adaptive filtering [191]. The corrected signal is subsequently demodulated and decoded using techniques and error correction coding to restore the original signal. To maintain resilience against evolving jamming tactics, the system continuously updates the HDBN model to improve suppression commands. Simulation results show that the mean square error (MSE) of suppression commands decreases as the JSR increases, meaning that jammers attacking with higher power can be better estimated than jammers attacking with lower power. + +Different from the work in [98], which separates the jamming signal by analyzing its I/Q characteristics, the study in [101] proposes a Serial Interference Cancellation (SIC) scheme based on SDNR to eliminate injected anomalous signals in UAV communications, as shown in part A of Fig. 11. First, blind channel estimation and a WL-MMSE filter are used to identify UAV and jammer signals (as detailed in Section V-A). Then, the PSA ranks detected symbols based on SDNR, where the jamming signals rank higher in SDNR due to their higher emitted power. The SIC [193], + +[194] is subsequently designed for progressively eliminating jamming signals. Specifically, the high-rank jamming symbol is decoded, reconstructed using estimated channel parameters, and subtracted from the received signal. The process continues iteratively to eliminate previously detected jamming signals until all UAV symbols are successfully recovered, with the receiver dynamically updating channel estimation to adapt to jammer mobility and environmental changes. Simulation results demonstrate that the UAV signal can be recovered with low bit error rates $(< 10^{-4})$ even when the power of the jammer is double that of the UAV. + +Jamming attacks not only affect U2G and UAV-to-UAV communications but also cause RF interference, leading to UAVs failing to track GNSS signals in the LAENet. In light of this, the work in [104] proposes a self-calibrating digital beamforming (DBF) algorithm to effectively nullify jamming signals while preserving high-precision carrier phase measurements. It calibrates the antenna array's steering vectors and RF channel characteristics. Once calibration is complete, the system performs jamming detection and direction estimation by analyzing interference patterns across the antenna array. Then, the minimum power distortionless response (MPDR) optimization rule is used to calculate optimal beamforming weights, which aim to create nulls in the beam pattern corresponding to the directions of jamming signals, effectively suppressing them. The calculated beamforming weights are applied to the received signals to produce the beamformer output, which effectively suppresses jamming signals while preserving the carrier phase integrity of the desired signals. The proposed scheme achieves up to 80 dB Jammer-to-Signal Ratio (JSR) suppression, significantly outperforming the conventional Power Inversion (PI) scheme. + +In addition to jamming signals, spoofing attacks can easily + +transmit fake signals to manipulate GNSS signals due to their open signal structure and weak signal strength [195]. One type of method is based on signal encryption or data encryption to prevent malicious spoofers from injecting illegitimate signals [196]–[198]. However, they may not be suitable for resource-constrained UAVs in the LAENet. Therefore, defending against spoofing signal injection based on different signal characteristics is a promising solution. The authors in [105], [106] propose an anti-spoofing system, called SemperFi, to autonomously recover legitimate signals during active spoofing for UAVs. The system employs two core modules: the Adversarial Peak Identifier (API) and the Legitimate Signal Retriever (LSR), as shown in part B of Fig. 11. The API detects spoofed signals by correlating inertial measurement unit (IMU) data with calculated position-velocity-time (PVT) solutions [199]. The LSR module replicates the spoofing signal once it is identified. Then, similar to the study in [101], the SIC is applied to subtract the replica from the composite received signal that contains legitimate and spoofing signals. SemperFi enters an iterative refinement process if spoofing signals persist after initial cancellation, where replication, subtraction, and reassessment are performed until the spoofing detector no longer triggers an alarm, indicating sufficient attenuation or elimination of spoofing. + +Besides recovering legitimate signals by subtracting spoofing signals from the received signal [101], [105], [106], projecting the signal is also a viable injection defense strategy. In the study [107], the GNSS receiver's spoofing mitigation algorithm employs a subspace projection-based interference cancellation method to effectively eliminate spoofing signals, as shown in part C of Fig. 11. Specifically, the receiver on UAVs acquires and tracks incoming signals, identifying potential spoofing signals and reconstructing them based on their power levels, pseudo-random noise (PRN) code delays, and carrier frequencies. Then, the receiver uses these reconstructed spoofing signals to construct a spoofing subspace, which represents all possible linear combinations of spoofing signal characteristics. To effectively remove spoofing signals from the received signal, the receiver performs orthogonal projection to obtain a cleaned signal by mapping the received signal onto a complementary null space that is mathematically orthogonal to the spoofing subspace. Simulation results show that shorter projection lengths suppress spoofing signals more effectively than longer projections, achieving a $20\mathrm{dB}$ gain in Signal-to-Interference Ratio (SIR). + +Lesson Learned The above-mentioned studies have demonstrated the effectiveness for mitigating injection signals, such as jamming and spoofing attacks, thereby enhancing UAV communication reliability and security. These advancements leverage techniques that not only detect malicious signal interference but also enable autonomous recovery. One key advantage is that non-cooperative detection techniques, such as blind estimation [101] and self-awareness models [98], allow for efficient attack identification without requiring prior knowledge of the attacker's signal characteristics to adapt to dynamic and adversarial environments. However, several challenges remain in that beamforming-based or spatial filtering techniques rely on multi-antenna configurations [101], + +[104], limiting their applicability in cost-sensitive or small UAV systems. Future work should explore lightweight and energy-efficient implementations of injection defense to support stable UAV signal integrity protection. Additionally, more intelligent injection defense strategies combining optimization methods, RL, and ML could enhance resilience against more sophisticated adversaries. + +# VI. FUTURE RESEARCH DIRECTIONS + +# A. Energy-efficient Physical Layer Security + +Future work can focus on exploring more unique physical characteristics of wireless communication, such as exploiting channel characteristics and implementing simple coding schemes, to develop secure and low-energy protocols. Meanwhile, drones in the LAENet need to develop adaptive power control strategies that dynamically adjust transmission power based on channel conditions and security requirements to minimize unnecessary energy consumption [200]. Moreover, dynamic trajectory optimization is equally important for energy efficiency [201]. Future research can explore enabling UAVs to learn attack patterns in real time, share secure trajectory models across swarms, and dynamically adjust flight paths based on real-time security and power consumption feedback. + +# B. Multi-drone Collaboration for Secure Communication + +Future research on secure physical layer communication in the LAENet should move beyond existing dual-UAV collaboration models and explore distributed multi-UAV coordination (or UAV swarms) for enhanced resilience against jamming, spoofing, and unauthorized access [202]. For example, UAV swarms can collaboratively emit interference signals to obscure unauthorized receivers, thereby enhancing the confidentiality of communications [20]. Additionally, the integration of adaptive trust-based mutual authentication protocols among UAVs is essential [26]. Multiple UAVs with mutually verified identities can enable dynamic and secure spectrum-sharing mechanisms to optimize resource utilization in the LAENet. + +# C. AI-driven Security Defense Strategy + +Existing AI-based security strategies mainly focus on training AI models to identify anomalous signals while having some limitations. The resource-constrained drones are unable to train high-quality AI models, making the integration of edge computing a promising approach for model training [200]. Note that AI models may be difficult to generalize in recognizing various anomalous signals because they are pre-trained on previously collected datasets of fixed size. Future work can explore leveraging GAN or diffusion models to generate datasets based on real-time captured anomalous signals [203]. Furthermore, emerging generative AI technologies, such as the diffusion model for secure network topology generation in low-altitude domains [189], [204], AI agents for human-aerial vehicle secure interaction [205], and mixture of experts for robust wireless communications [2], [206], can be explored to achieve a more autonomous and intelligent LAENet. + +# D. Space-Air-Ground Integrated Security Architecture + +Future research can explore establishing a multi-domain physical layer security framework for LAENet to connect space, air, and ground layers, providing seamless communication coverage and cost-effective network access [55], [207]. A potential key research direction is the development of a coordinated multi-tier security mechanism, where satellites, UAVs, and terrestrial base stations collaboratively enhance physical layer security through dynamic resource allocation and interference management based on real-time CSI and environmental conditions, such as UAV mobility, channel fading, and spectrum constraints. + +# E. 6G-Enabled Secure UAV Communication + +The advent of 6G networks presents new opportunities for LAENet. Terahertz (THz) communication can offer ultrahigh-speed data transmission capabilities for LAENet [208]. Future research can explore the integration of THz with advanced beamforming techniques to focus signals on legitimate users, thereby enhancing security and reducing the risk of interception. Furthermore, Reconfigurable Intelligent Surfaces (RIS) play a crucial role in strengthening physical layer security by intelligently controlling wireless signal propagation [209], [210]. Future work can investigate RIS-based secure beamforming strategies to mitigate adversary interception, and leverage optimization techniques and DRL to adaptively adjust beamforming against eavesdropping or jamming attacks. + +# VII. CONCLUSION + +This paper has presented a comprehensive survey on secure physical layer communications in the LAENet, emphasizing the importance of safeguarding confidentiality, availability, and integrity in communications. It introduced the concept and architecture of the LAENet and outlined the associated security issues in physical layer communication. Then, the survey provided in-depth reviews of countermeasures for anti-eavesdropping strategies, authentication schemes, anti-jamming strategies, spoofing defenses, anomaly detection, and injection defense. Finally, the paper proposed a set of forward-looking future research directions. These discussions highlighted the critical role of secure physical layer communication in supporting the development of the LAENet and offered valuable insights for ongoing advancements in this emerging domain. + +# REFERENCES + +[1] Z. Li, Z. Gao, K. Wang, Y. Mei, C. Zhu, L. Chen, X. Wu, and D. Niyato, "Unauthorized uav countermeasure for low-altitude economy: Joint communications and jamming based on mimo cellular systems," IEEE Internet of Things Journal, vol. 12, no. 6, pp. 6659-6672, 2025. +[2] C. Zhao, J. Wang, R. Zhang, D. Niyato, G. Sun, H. Du, D. I. Kim, and A. Jamalipour, "Generative ai-enabled wireless communications for robust low-altitude economy networking," arXiv preprint arXiv:2502.18118, 2025. +[3] H. A. H. Alobaidy, R. Nordin, M. J. Singh, N. F. Abdullah, A. Haniz, K. Ishizu, T. Matsumura, F. Kojima, and N. Ramli, "Low-altitude-platform-based airborne IoT network (lap-ain) for water quality monitoring in harsh tropical environment," IEEE Internet of Things Journal, vol. 9, no. 20, pp. 20034-20054, 2022. + +[4] China holds central economic work conference to plan for 2024. Accessed: Dec. 12, 2023. [Online]. Available: https://english.www.gov.cn/news/202312/12/content_WS657860aecd0868f4e8e21c2.html +[5] J. Qiu, D. Grace, G. Ding, M. D. Zakaria, and Q. Wu, "Air-ground heterogeneous networks for 5g and beyond via integrating high and low altitude platforms," IEEE Wireless Communications, vol. 26, no. 6, pp. 140-148, 2019. +[6] H. Ahmadinejad and A. Falahati, "Forming a two-tier heterogeneous air-network via combination of high and low altitude platforms," IEEE Transactions on Vehicular Technology, vol. 71, no. 2, pp. 1989-2001, 2022. +[7] N. Hossein Motlagh, T. Taleb, and O. Arouk, "Low-altitude unmanned aerial vehicles-based internet of things services: Comprehensive survey and future perspectives," IEEE Internet of Things Journal, vol. 3, no. 6, pp. 899-922, 2016. +[8] H. Yang, M. Zheng, Z. Shao, Y. Jiang, and Z. Xiong, "Intelligent computation offloading and trajectory planning for 3d target search in low-altitude economy scenarios," IEEE Wireless Communications Letters, pp. 1-1, 2025. +[9] R. Shakeri, M. A. Al-Garadi, A. Badawy, A. Mohamed, T. Khattab, A. K. Al-Ali, K. A. Harras, and M. Guizani, "Design challenges of multi-uav systems in cyber-physical applications: A comprehensive survey and future directions," IEEE Communications Surveys & Tutorials, vol. 21, no. 4, pp. 3340-3385, 2019. +[10] Y. Zhang, X. Gao, N. Ye, D. Niyato, Z. Han, and K. Yang, "Joint uav deployment, power allocation, and coalition formation for physical layer security in heterogeneous networks," IEEE Transactions on Vehicular Technology, pp. 1-15, 2025. +[11] Z. Liu, Y. Cao, P. Gao, X. Hua, D. Zhang, and T. Jiang, "Multi-uav network assisted intelligent edge computing: Challenges and opportunities," China Communications, vol. 19, no. 3, pp. 258-278, 2022. +[12] Y. Liu, X. Gong, J. Chen, S. Chen, and Y. Yang, "Rotation-invariant siamese network for low-altitude remote-sensing image registration," IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing, vol. 13, pp. 5746-5758, 2020. +[13] G. Cheng, X. Song, Z. Lyu, and J. Xu, "Networked isac for low-altitude economy: Coordinated transmit beamforming and UAV trajectory design," IEEE Transactions on Communications, pp. 1-1, 2025. +[14] G. Cheng, X. Song, Z. Lyu, and J. Xu, “Networked isac for low-altitude economy: Transmit beamforming and uav trajectory design,” in 2024 IEEE/CIC International Conference on Communications in China (ICCC), 2024, pp. 78-83. +[15] X. Zheng, G. Sun, J. Li, J. Wang, Q. Wu, D. Niyato, and A. Jamalipour, "Uav swarm-enabled collaborative post-disaster communications in low altitude economy via a two-stage optimization approach," arXiv preprint arXiv:2501.05742, 2025. +[16] China's low-altitude economy soars at high speed. Accessed: Dec. 19, 2024. [Online]. Available: https://www.chinadaily.com.cn/a/202412/19/WS6763b8b7a310f1265a1d3d24.html +[17] China's low-altitude economy takes flight: A new engine for innovation-driven growth. Accessed: Mar. 17, 2025. [Online]. Available: https://www.chinadaily.com.cn/a/202412/19/WS6763b8b7a310f1265a1d3d24.html +[18] Flying air taxis move closer to us takeoff with issuing of FAA rule. Accessed: Oct. 22, 2024. [Online]. Available: https://www.usnews.com/news/business/articles/2024-10-22/flying-air-taxis-move-closer-to-us-takeoff-with-issuing-of-faa-rule +[19] A. Rugo, C. A. Ardagna, and N. E. Ioini, “A security review in the uavnet era: Threats, countermeasures, and gap analysis,” ACM Comput. Surv., vol. 55, no. 1, Jan. 2022. [Online]. Available: https://doi.org/10.1145/3485272 +[20] X. Wang, Z. Zhao, L. Yi, Z. Ning, L. Guo, F. R. Yu, and S. Guo, "A survey on security of uav swarm networks: Attacks and countermeasures," ACM Comput. Surv., vol. 57, no. 3, Nov. 2024. [Online]. Available: https://doi.org/10.1145/3703625 +[21] O. Ceviz, S. Sen, and P. Sadioglu, “A survey of security in uavs and fanets: issues, threats, analysis of attacks, and solutions,” IEEE Communications Surveys & Tutorials, pp. 1-1, 2024. +[22] H. J. Hadi, Y. Cao, K. U. Nisa, A. M. Jamil, and Q. Ni, "A comprehensive survey on security, privacy issues and emerging defence technologies for uavs," Journal of Network and Computer Applications, vol. 213, p. 103607, 2023. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S1084804523000267 +[23] V. Hassija, V. Chamola, A. Agrawal, A. Goyal, N. C. Luong, D. Niyato, F. R. Yu, and M. Guizani, "Fast, reliable, and secure drone communication: A comprehensive survey," IEEE Communications Surveys & Tutorials, vol. 23, no. 4, pp. 2802-2832, 2021. + +[24] B. Zolfaghari, M. Abbasmollaei, F. Hajizadeh, N. Yanai, and K. Bibak, "Secure uav (drone) and the great promise of ai," ACM Comput. Surv., vol. 56, no. 11, Jul. 2024. [Online]. Available: https://doi.org/10.1145/3673225 +[25] X. Wei, J. Ma, and C. Sun, “A survey on security of unmanned aerial vehicle systems: Attacks and countermeasures,” IEEE Internet of Things Journal, vol. 11, no. 21, pp. 34826-34847, 2024. +[26] M. Adil, M. A. Jan, Y. Liu, H. Abulkasim, A. Farouk, and H. Song, "A systematic survey: Security threats to UAV-aided IoT applications, taxonomy, current challenges and requirements with future research directions," IEEE Transactions on Intelligent Transportation Systems, vol. 24, no. 2, pp. 1437-1455, 2023. +[27] N. Kumar and A. Chaudhary, "Surveying cybersecurity vulnerabilities and countermeasures for enhancing uav security," Computer Networks, vol. 252, p. 110695, 2024. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S1389128624005279 +[28] J. Wang, X. Wang, R. Gao, C. Lei, W. Feng, N. Ge, S. Jin, and T. Q. S. Quek, “Physical layer security for uav communications: A comprehensive survey,” China Communications, vol. 19, no. 9, pp. 77–115, 2022. +[29] A. Fotouhi, H. Qiang, M. Ding, M. Hassan, L. G. Giordano, A. Garcia-Rodriguez, and J. Yuan, "Survey on uav cellular communications: Practical aspects, standardization advancements, regulation, and security challenges," IEEE Communications Surveys & Tutorials, vol. 21, no. 4, pp. 3417-3442, 2019. +[30] M. Adil, H. Song, S. Mastorakis, H. Abulkasim, A. Farouk, and Z. Jin, "Uav-assisted IoT applications, cybersecurity threats, ai-enabled solutions, open challenges with future research directions," IEEE Transactions on Intelligent Vehicles, vol. 9, no. 4, pp. 4583-4605, 2024. +[31] W. U. Khan, E. Lagunas, Z. Ali, M. A. Javed, M. Ahmed, S. Chatzinotas, B. Ottersten, and P. Popovski, "Opportunities for physical layer security in uav communication enhanced with intelligent reflective surfaces," IEEE Wireless Communications, vol. 29, no. 6, pp. 22-28, 2022. +[32] J. Wang, H. Du, D. Niyato, M. Zhou, J. Kang, and H. Vincent Poor, "Acceleration estimation of signal propagation path length changes for wireless sensing," IEEE Transactions on Wireless Communications, vol. 23, no. 9, pp. 11476-11492, 2024. +[33] T. Wang, C.-K. Wen, H. Wang, F. Gao, T. Jiang, and S. Jin, "Deep learning for wireless physical layer: Opportunities and challenges," China Communications, vol. 14, no. 11, pp. 92-111, 2017. +[34] J. Wang, H. Du, D. Niyato, J. Kang, S. Cui, X. Shen, and P. Zhang, "Generative ai for integrated sensing and communication: Insights from the physical layer perspective," IEEE Wireless Communications, vol. 31, no. 5, pp. 246-255, 2024. +[35] S. Li, L. Xiao, Y. Liu, G. Liu, P. Xiao, and T. Jiang, "Performance analysis for orthogonal time frequency space modulation systems with generalized waveform," China Communications, vol. 20, no. 4, pp. 57-72, 2023. +[36] N. Xie, W. Xiong, M. Sha, T. Hu, P. Zhang, L. Huang, and D. Niyato, "Physical layer authentication with high compatibility using an encoding approach," IEEE Transactions on Communications, vol. 70, no. 12, pp. 8270-8285, 2022. +[37] S. Liu, T. Wang, and S. Wang, "Toward intelligent wireless communications: Deep learning - based physical layer technologies," Digital Communications and Networks, vol. 7, no. 4, pp. 589-597, 2021. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S2352864821000742 +[38] Y. Zhang, Y. Peng, X. Tang, L. Xiao, and T. Jiang, "Large-scale fading decoding aided user-centric cell-free massive mimo: Uplink error probability analysis and detector design," IEEE Transactions on Wireless Communications, vol. 23, no. 8, pp. 10336-10349, 2024. +[39] H. Du, J. Wang, D. Niyato, J. Kang, Z. Xiong, J. Zhang, and X. Shen, "Semantic communications for wireless sensing: Ris-aided encoding and self-supervised decoding," IEEE Journal on Selected Areas in Communications, vol. 41, no. 8, pp. 2547-2562, 2023. +[40] P. Yang, X. Xi, K. Guo, T. Q. S. Quek, J. Chen, and X. Cao, "Proactive uav network slicing for urllc and mobile broadband service multiplexing," IEEE Journal on Selected Areas in Communications, vol. 39, no. 10, pp. 3225-3244, 2021. +[41] J. Huang, A. Wang, G. Sun, J. Li, J. Wang, H. Du, and D. Niyato, "Dual uav cluster-assisted maritime physical layer secure communications via collaborative beamforming," IEEE Internet of Things Journal, pp. 1-1, 2024. +[42] Z. Duan, Z. Chang, N. Xie, W. Sun, and D. T. Niyato, "Adaptive strategies in enhancing physical layer security: A comprehensive + +survey," ACM Comput. Surv., vol. 57, no. 7, Feb. 2025. [Online]. Available: https://doi.org/10.1145/3715319 +[43] Q. Wang, Z. Chen, W. Mei, and J. Fang, "Improving physical layer security using uav-enabled mobile relaying," IEEE Wireless Communications Letters, vol. 6, no. 3, pp. 310-313, 2017. +[44] S. Liu, H. Yang, M. Zheng, L. Xiao, Z. Xiong, and D. Niyato, “Uav-enabled semantic communication in mobile edge computing under jamming attacks: An intelligent resource management approach,” IEEE Transactions on Wireless Communications, vol. 23, no. 11, pp. 17 493–17 507, 2024. +[45] S. Bi, K. Li, S. Hu, W. Ni, C. Wang, and X. Wang, “Detection and mitigation of position spoofing attacks on cooperative uav swarm formations,” IEEE Transactions on Information Forensics and Security, vol. 19, pp. 1883–1895, 2024. +[46] X. Sun, D. W. K. Ng, Z. Ding, Y. Xu, and Z. Zhong, "Physical layer security in uav systems: Challenges and opportunities," IEEE Wireless Communications, vol. 26, no. 5, pp. 40-47, 2019. +[47] G. Zhang, Q. Hu, Y. Zhang, Y. Dai, and T. Jiang, "Lightweight cross-domain authentication scheme for securing wireless IoT devices using backscatter communication," IEEE Internet of Things Journal, vol. 11, no. 12, pp. 22021-22035, 2024. +[48] Q. Wu, W. Mei, and R. Zhang, "Safeguarding wireless network with uavs: A physical layer security perspective," IEEE Wireless Communications, vol. 26, no. 5, pp. 12-18, 2019. +[49] H.-M. Wang, X. Zhang, and J.-C. Jiang, “Uav-involved wireless physical-layer secure communications: Overview and research directions,” IEEE Wireless Communications, vol. 26, no. 5, pp. 32-39, 2019. +[50] B. Li, Z. Fei, Y. Zhang, and M. Guizani, "Secure uav communication networks over 5g," IEEE Wireless Communications, vol. 26, no. 5, pp. 114-120, 2019. +[51] L. Bai, L. Zhu, J. Liu, J. Choi, and W. Zhang, "Physical layer authentication in wireless communication networks: A survey," Journal of Communications and Information Networks, vol. 5, no. 3, pp. 237-264, 2020. +[52] N. Xie, Z. Li, and H. Tan, "A survey of physical-layer authentication in wireless communications," IEEE Communications Surveys & Tutorials, vol. 23, no. 1, pp. 282-310, 2021. +[53] Y. Xu, T. Zhang, D. Yang, Y. Liu, and M. Tao, "Joint resource and trajectory optimization for security in uav-assisted mec systems," IEEE Transactions on Communications, vol. 69, no. 1, pp. 573-588, 2021. +[54] Y. Zhang, Z. Kuang, Y. Feng, and F. Hou, "Task offloading and trajectory optimization for secure communications in dynamic user multi-uav mec systems," IEEE Transactions on Mobile Computing, vol. 23, no. 12, pp. 14427-14440, 2024. +[55] Y. Zhang, X. Gao, H. Yuan, K. Yang, J. Kang, P. Wang, and D. Niyato, "Joint uav trajectory and power allocation with hybrid fso/rf for secure space-air-ground communications," IEEE Internet of Things Journal, vol. 11, no. 19, pp. 31407-31421, 2024. +[56] W. Wang, X. Li, R. Wang, K. Cumanan, W. Feng, Z. Ding, and O. A. Dobre, "Robust 3d-trajectory and time switching optimization for dual-uav-enabled secure communications," IEEE Journal on Selected Areas in Communications, vol. 39, no. 11, pp. 3334-3347, 2021. +[57] C. Wen, L. Qiu, and X. Liang, "Securing uav communication with mobile uav eavesdroppers: Joint trajectory and communication design," in 2021 IEEE Wireless Communications and Networking Conference (WCNC), 2021, pp. 1-6. +[58] W. Lu, Y. Ding, Y. Gao, S. Hu, Y. Wu, N. Zhao, and Y. Gong, "Resource and trajectory optimization for secure communications in dual unmanned aerial vehicle mobile edge computing systems," IEEE Transactions on Industrial Informatics, vol. 18, no. 4, pp. 2704-2713, 2022. +[59] F. Lu, G. Liu, W. Lu, Y. Gao, J. Cao, N. Zhao, and A. Nallanathan, "Resource and trajectory optimization for uav-relay-assisted secure maritime mec," IEEE Transactions on Communications, vol. 72, no. 3, pp. 1641-1652, 2024. +[60] A. S. Abdalla, A. Behfarnia, and V. Marojevic, "Uav trajectory and multi-user beamforming optimization for clustered users against passive eavesdropping attacks with unknown csi," IEEE Transactions on Vehicular Technology, vol. 72, no. 11, pp. 14426-14442, 2023. +[61] Y. Ding, H. Han, W. Lu, Y. Wang, N. Zhao, X. Wang, and X. Yang, "Ddqn-based trajectory and resource optimization for uav-aided mec secure communications," IEEE Transactions on Vehicular Technology, vol. 73, no. 4, pp. 6006-6011, 2024. +[62] H. Kang, X. Chang, J. Mišić, V. B. Mišić, J. Fan, and J. Bai, “Improving dual-uav aided ground-uav bi-directional communication security: Joint uav trajectory and transmit power optimization,” IEEE Transactions on Vehicular Technology, vol. 71, no. 10, pp. 10570–10583, 2022. + +[63] Y. Zhang, Z. Mou, F. Gao, J. Jiang, R. Ding, and Z. Han, "Uav-enabled secure communications by multi-agent deep reinforcement learning," IEEE Transactions on Vehicular Technology, vol. 69, no. 10, pp. 11599-11611, 2020. +[64] Y. Liu, C. Huang, G. Chen, R. Song, S. Song, and P. Xiao, “Deep learning empowered trajectory and passive beamforming design in uav-ris enabled secure cognitive non-terrestrial networks,” IEEE Wireless Communications Letters, vol. 13, no. 1, pp. 188–192, 2024. +[65] J. Wang, R. Wang, Z. Zheng, R. Lin, L. Wu, and F. Shu, "Physical layer security enhancement in uav-assisted cooperative jamming for cognitive radio networks: A mappo-lstm deep reinforcement learning approach," IEEE Transactions on Vehicular Technology, pp. 1-14, 2024. +[66] X. Tang, N. Liu, R. Zhang, and Z. Han, "Deep learning-assisted secure uav-relaying networks with channel uncertainties," IEEE Transactions on Vehicular Technology, vol. 71, no. 5, pp. 5048-5059, 2022. +[67] X. Li, R. Yao, Y. Fan, P. Wang, and J. Xu, "Secure efficiency map-enabled uav trajectory planning," IEEE Wireless Communications Letters, vol. 12, no. 8, pp. 1324-1328, 2023. +[68] R. Karmakar, G. Kaddoum, and O. Akhrif, “A novel federated learning-based smart power and 3d trajectory control for fairness optimization in secure uav-assisted mec services,” IEEE Transactions on Mobile Computing, vol. 23, no. 5, pp. 4832–4848, 2024. +[69] Z. Li, X. Liao, J. Shi, L. Li, and P. Xiao, “Md-gan-based uav trajectory and power optimization for cognitive covert communications,” IEEE Internet of Things Journal, vol. 9, no. 12, pp. 10187-10199, 2022. +[70] S. Jia, L. Xiaomeng, L. Xiaomin, T. Zhuangzhuang, and H. Junfan, "Covert leo satellite communication aided by generative adversarial network based cooperative uav jamming," China Communications, vol. 21, no. 9, pp. 27-39, 2024. +[71] C. Zhang, G. Sun, J. Li, Q. Wu, J. Wang, D. Niyato, and Y. Liu, "Multi-objective aerial collaborative secure communication optimization via generative diffusion model-enabled deep reinforcement learning," IEEE Transactions on Mobile Computing, pp. 1-18, 2024. +[72] T. Alladi, Naren, G. Bansal, V. Chamola, and M. Guizani, "Secauthuav: A novel authentication scheme for uav-ground station and uav-uav communication," IEEE Transactions on Vehicular Technology, vol. 69, no. 12, pp. 15068-15077, 2020. +[73] R. Karmakar, G. Kaddoum, and O. Akhrif, "A puf and fuzzy extractor-based uav-ground station and uav-uav authentication mechanism with intelligent adaptation of secure sessions," IEEE Transactions on Mobile Computing, vol. 23, no. 5, pp. 3858-3875, 2024. +[74] M. Tanveer, A. Aldosary, S.-u.-d. Khokhar, A. K. Das, S. A. Aldossari, and S. A. Chaudhry, “Paf-iod: Puf-enabled authentication framework for the internet of drones,” IEEE Transactions on Vehicular Technology, vol. 73, no. 7, pp. 9560–9574, 2024. +[75] S. J. Maeng, Y. Yapici, i. Guvenc, A. Bhuyan, and H. Dai, “Precoder design for physical-layer security and authentication in massive mimo uav communications,” IEEE Transactions on Vehicular Technology, vol. 71, no. 3, pp. 2949–2964, 2022. +[76] Y. Zhou, Z. Ma, H. Liu, P. L. Yeoh, Y. Li, B. Vucetic, and P. Fan, "A uav-aided physical layer authentication based on channel characteristics and geographical locations," IEEE Transactions on Vehicular Technology, vol. 73, no. 1, pp. 1053–1064, 2024. +[77] Y. Zhou, Y. Wang, Z. Ma, P. Fan, and M. Xiao, "Physical layer authentication for uav communications under rayleigh and rician channels," IEEE Transactions on Wireless Communications, pp. 1-1, 2025. +[78] Y.-S. Shiu, S. Y. Chang, H.-C. Wu, S. C.-H. Huang, and H.-H. Chen, "Physical layer security in wireless networks: a tutorial," IEEE Wireless Communications, vol. 18, no. 2, pp. 66-74, 2011. +[79] J. Xu, D. Li, Z. Zhu, Z. Yang, N. Zhao, and D. Niyato, “Anti-jamming design for integrated sensing and communication via aerial iris,” IEEE Transactions on Communications, vol. 72, no. 8, pp. 4607–4619, 2024. +[80] B. Duo, Q. Wu, X. Yuan, and R. Zhang, “Anti-jamming 3d trajectory design for uav-enabled wireless sensor networks under probabilistic loss channel,” IEEE Transactions on Vehicular Technology, vol. 69, no. 12, pp. 16288-16293, 2020. +[81] Y. Wu, W. Yang, X. Guan, and Q. Wu, "Energy-efficient trajectory design for uav-enabled communication under malicious jamming," IEEE Wireless Communications Letters, vol. 10, no. 2, pp. 206-210, 2021. +[82] Y. Wu, W. Yang, X. Guan, and Q. Wu, "Uav-enabled relay communication under malicious jamming: Joint trajectory and transmit power optimization," IEEE Transactions on Vehicular Technology, vol. 70, no. 8, pp. 8275-8279, 2021. + +[83] M. A. Aref, S. K. Jayaweera, and S. Machuzak, "Multi-agent reinforcement learning based cognitive anti-jamming," in 2017 IEEE Wireless Communications and Networking Conference (WCNC), 2017, pp. 1-6. +[84] L. Jia, F. Yao, Y. Sun, Y. Xu, S. Feng, and A. Anpalagan, “A hierarchical learning solution for anti-jamming stackelberg game with discrete power strategies,” IEEE Wireless Communications Letters, vol. 6, no. 6, pp. 818–821, 2017. +[85] X. Liu, Y. Xu, L. Jia, Q. Wu, and A. Anpalagan, “Anti-jamming communications using spectrum waterfall: A deep reinforcement learning approach,” IEEE Communications Letters, vol. 22, no. 5, pp. 998–1001, 2018. +[86] H. Yang, Z. Xiong, J. Zhao, D. Niyato, Q. Wu, H. V. Poor, and M. Tornatore, "Intelligent reflecting surface assisted anti-jamming communications: A fast reinforcement learning approach," IEEE Transactions on Wireless Communications, vol. 20, no. 3, pp. 1963-1974, 2021. +[87] Z. Yin, Y. Lin, Y. Zhang, Y. Qian, F. Shu, and J. Li, "Collaborative multiagent reinforcement learning aided resource allocation for uav anti-jamming communication," IEEE Internet of Things Journal, vol. 9, no. 23, pp. 23995-24008, 2022. +[88] Y. Ma, K. Liu, Y. Liu, X. Wang, and Z. Zhao, "An intelligent game-based anti-jamming solution using adversarial populations for aerial communication networks," IEEE Transactions on Cognitive Communications and Networking, pp. 1-1, 2024. +[89] Z. Shao, H. Yang, L. Xiao, W. Su, Y. Chen, and Z. Xiong, "Deep reinforcement learning-based resource management for uav-assisted mobile edge computing against jamming," IEEE Transactions on Mobile Computing, vol. 23, no. 12, pp. 13358-13374, 2024. +[90] Y. Zhou, P. L. Yeoh, K. J. Kim, Z. Ma, Y. Li, and B. Vucetic, "Game theoretic physical layer authentication for spoofing detection in uav communications," IEEE Transactions on Vehicular Technology, vol. 71, no. 6, pp. 6750-6755, 2022. +[91] Q. Cheng, Y. Zhou, H. Liu, L. Yang, Z. Ma, and P. Fan, "Physical layer authentication in uav communications with channel randomness and jamming uncertainty," IEEE Transactions on Vehicular Technology, pp. 1-6, 2025. +[92] A. Eldosouky, A. Ferdowsi, and W. Saad, “Drones in distress: A game-theoretic countermeasure for protecting uavs against gps spoofing,” IEEE Internet of Things Journal, vol. 7, no. 4, pp. 2840–2854, 2020. +[93] D. She, W. Wang, Z. Yin, J. Wang, and H. Shan, "Gps spoofing attack recognition for uavs with limited samples," IEEE Internet of Things Journal, vol. 12, no. 1, pp. 250-261, 2025. +[94] Y. Dang, C. Benzaid, B. Yang, T. Taleb, and Y. Shen, "Deep-ensemble-learning-based gps spoofing detection for cellular-connected uavs," IEEE Internet of Things Journal, vol. 9, no. 24, pp. 25068-25085, 2022. +[95] X. Wang, J. Wang, Y. Xu, J. Chen, L. Jia, X. Liu, and Y. Yang, "Dynamic spectrum anti-jamming communications: Challenges and opportunities," IEEE Communications Magazine, vol. 58, no. 2, pp. 79-85, 2020. +[96] L. Zhang, G. Ding, Q. Wu, and Z. Han, "Spectrum sensing under spectrum misuse behaviors: A multi-hypothesis test perspective," IEEE Transactions on Information Forensics and Security, vol. 13, no. 4, pp. 993-1007, 2018. +[97] S. C. Hassler, U. A. Mughal, and M. Ismail, “Cyber-physical intrusion detection system for unmanned aerial vehicles,” IEEE Transactions on Intelligent Transportation Systems, vol. 25, no. 6, pp. 6106–6117, 2024. +[98] A. Krayani, A. S. Alam, L. Marcenaro, A. Nallanathan, and C. Regazzoni, "An emergent self-awareness module for physical layer security in cognitive uav radios," IEEE Transactions on Cognitive Communications and Networking, vol. 8, no. 2, pp. 888-906, 2022. +[99] A. Krayani, A. S. Alam, L. Marcenaro, A. Nallanathan, and C. Regazzoni, "Automatic jamming signal classification in cognitive uav radios," IEEE Transactions on Vehicular Technology, vol. 71, no. 12, pp. 12972-12988, 2022. +[100] A. Krayani, A. S. Alam, L. Marcenaro, A. Nallanathan, and C. Regazzoni, “A novel resource allocation for anti-jamming in cognitive-uavs: An active inference approach,” IEEE Communications Letters, vol. 26, no. 10, pp. 2272–2276, 2022. +[101] D. Darsena, G. Gelli, I. Iudice, and F. Verde, “Detection and blind channel estimation for uav-aided wireless sensor networks in smart cities under mobile jamming attack,” IEEE Internet of Things Journal, vol. 9, no. 14, pp. 11932–11950, 2022. +[102] L. Zhang, G. Ding, Q. Wu, and P. Liu, “Detection of abnormal power emission in uav communication networks,” IEEE Wireless Communications Letters, vol. 8, no. 4, pp. 1179–1182, 2019. + +[103] T. M. Hoang, N. M. Nguyen, and T. Q. Duong, “Detection of eavesdropping attack in uav-aided wireless systems: Unsupervised learning with one-class svm and k-means clustering,” IEEE Wireless Communications Letters, vol. 9, no. 2, pp. 139–142, 2020. +[104] Y. An, R. Kang, Y. Ban, and S. Yang, “Beidou receiver based on anti-jamming antenna arrays with self-calibration for precise relative positioning,” Journal of Systems Engineering and Electronics, vol. 35, no. 5, pp. 1132–1147, 2024. +[105] H. Sathaye and A. Ranganathan, “Semperfi: a psychoer eliminating standalone gps receiver,” in Proceedings of the 13th ACM Conference on Security and Privacy in Wireless and Mobile Networks, ser. WiSec '20. New York, NY, USA: Association for Computing Machinery, 2020, p. 353–355. [Online]. Available: https://doi.org/10.1145/3395351.3401703 +[106] H. Sathaye, G. LaMountain, P. Closas, and A. Ranganathan, “Semperfi: Anti-spoofing gps receiver for uavs,” in Network and Distributed Systems Security (NDSS) Symposium 2022, 2022. +[107] S. Han, L. Chen, W. Meng, and C. Li, "Improve the security of gnsss receivers through spoofing mitigation," IEEE Access, vol. 5, pp. 21057-21069, 2017. +[108] X. Ye, Y. Mao, X. Yu, S. Sun, L. Fu, and J. Xu, "Integrated sensing and communications for low-altitude economy: A deep reinforcement learning approach," arXiv preprint arXiv:2412.04074, 2024. +[109] C. Huang, S. Fang, H. Wu, Y. Wang, and Y. Yang, "Low-altitude intelligent transportation: System architecture, infrastructure, and key technologies," Journal of Industrial Information Integration, vol. 42, p. 100694, 2024. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S2452414X24001377 +[110] Y. Yang, Y. Chen, J. Wang, G. Sun, and D. Niyato, "Embodied aiempowered low altitude economy: Integrated sensing, communications, computation, and control (isc3)," arXiv preprint arXiv:2412.19996, 2024. +[111] J. Li, G. Sun, Q. Wu, S. Liang, J. Wang, D. Niyato, and D. I. Kim, "Aerial secure collaborative communications under eavesdropper collusion in low-altitude economy: A generative swarm intelligent approach," arXiv preprint arXiv:2503.00721, 2025. +[112] G. Sun, W. Xie, D. Niyato, H. Du, J. Kang, J. Wu, S. Sun, and P. Zhang, "Generative ai for advanced uav networking," IEEE Network, pp. 1-1, 2024. +[113] X. Tang, X. Li, R. Yu, Y. Wu, J. Ye, F. Tang, and Q. Chen, "Digital-twin-assisted task assignment in multi-uav systems: A deep reinforcement learning approach," IEEE Internet of Things Journal, vol. 10, no. 17, pp. 15362-15375, 2023. +[114] X. Tang, Q. Chen, R. Yu, and X. Li, "Digital twin-empowered task assignment in aerial mec network: A resource coalition cooperation approach with generative model," IEEE Transactions on Network Science and Engineering, vol. 12, no. 1, pp. 13-27, 2025. +[115] Y. Jiang, X. Li, G. Zhu, H. Li, J. Deng, and Q. Shi, "6g non-terrestrial networks enabled low-altitude economy: Opportunities and challenges," ArXiv, vol. abs/2311.09047, 2023. [Online]. Available: https://api_semanticscholar.org/CorpusID:265213350 +[116] X. Luo, Y. Zhang, Z. He, G. Yang, and Z. Ji, "A two-step environment-learning-based method for optimal uav deployment," IEEE Access, vol. 7, pp. 149328-149340, 2019. +[117] X. Tang, Q. Chen, W. Weng, B. Liao, J. Wang, X. Cao, and X. Li, "Dnn task assignment in uav networks: A generative ai enhanced multi-agent reinforcement learning approach," IEEE Internet of Things Journal, pp. 1-1, 2025. +[118] H. Yang, J. Zhao, Z. Xiong, K.-Y. Lam, S. Sun, and L. Xiao, "Privacy-preserving federated learning for uav-enabled networks: Learning-based joint scheduling and resource management," IEEE Journal on Selected Areas in Communications, vol. 39, no. 10, pp. 3144-3159, 2021. +[119] X. Cai, T. Izydorczyk, J. Rodríguez-Pineiro, I. Z. Kovács, J. Wigard, F. M. L. Tavares, and P. E. Mogensen, "Empirical low-altitude air-to-ground spatial channel characterization for cellular networks connectivity," IEEE Journal on Selected Areas in Communications, vol. 39, no. 10, pp. 2975-2991, 2021. +[120] Y. Zhao, F. Zhou, L. Feng, W. Li, Y. Sun, and M. A. Imran, "Backhaul-constrained coverage analysis of integrated high and low altitude platforms aerial communication system in post-disaster areas," IEEE Communications Letters, vol. 27, no. 6, pp. 1629-1633, 2023. +[121] S. H. Alsamhi, F. A. Almalki, F. Afghah, A. Hawbani, A. V. Shvetsov, B. Lee, and H. Song, "Drones' edge intelligence over smart environments in b5g: Blockchain and federated learning synergy," IEEE Transactions on Green Communications and Networking, vol. 6, no. 1, pp. 295-312, 2022. + +[122] A. Ahmad, A. A. Cheema, and D. Finlay, "A survey of radio propagation channel modelling for low altitude flying base stations," Computer Networks, vol. 171, p. 107122, 2020. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S1389128619310692 +[123] I. Bozcan and E. Kayacan, "Context-dependent anomaly detection for low altitude traffic surveillance," in 2021 IEEE International Conference on Robotics and Automation (ICRA), 2021, pp. 224-230. +[124] Y. Liu, X. Gong, and Y. Yang, "A multilayer fusion network with rotation-invariant and dynamic feature representation for multiview low-altitude image registration," IEEE Geoscience and Remote Sensing Letters, vol. 18, no. 6, pp. 1019-1023, 2021. +[125] A. Omri and M. O. Hasna, "Physical layer security analysis of uav based communication networks," in 2018 IEEE 88th Vehicular Technology Conference (VTC-Fall), 2018, pp. 1-6. +[126] S. Samonas and D. Coss, “The cia strikes back: Redefining confidentiality, integrity and availability in security.” Journal of Information System Security, vol. 10, no. 3, 2014. +[127] C. Zhao, H. Du, D. Niyato, J. Kang, Z. Xiong, D. I. Kim, X. Shen, and K. B. Letaief, "Generative ai for secure physical layer communications: A survey," IEEE Transactions on Cognitive Communications and Networking, vol. 11, no. 1, pp. 3-26, 2025. +[128] J. M. Hamamreh, H. M. Furqan, and H. Arslan, "Classifications and applications of physical layer security techniques for confidentiality: A comprehensive survey," IEEE Communications Surveys & Tutorials, vol. 21, no. 2, pp. 1773-1828, 2019. +[129] M. Shakiba-Herfeh, A. Chorti, and H. Vincent Poor, “Physical layer security: Authentication, integrity, and confidentiality,” Physical layer security, pp. 129–150, 2021. +[130] S. Hu, Q. Wu, and X. Wang, "Energy management and trajectory optimization for uav-enabled legitimate monitoring systems," IEEE Transactions on Wireless Communications, vol. 20, no. 1, pp. 142-155, 2021. +[131] D. Wang, B. Bai, W. Zhao, and Z. Han, “A survey of optimization approaches for wireless physical layer security,” IEEE Communications Surveys & Tutorials, vol. 21, no. 2, pp. 1878–1911, 2019. +[132] M. A. Arfaoui, M. D. Soltani, I. Tavakkolnia, A. Ghrayeb, M. Safari, C. M. Assi, and H. Haas, "Physical layer security for visible light communication systems: A survey," IEEE Communications Surveys & Tutorials, vol. 22, no. 3, pp. 1887-1908, 2020. +[133] Z. Yin, M. Jia, N. Cheng, W. Wang, F. Lyu, Q. Guo, and X. Shen, "Uav-assisted physical layer security in multi-beam satellite-enabled vehicle communications," IEEE Transactions on Intelligent Transportation Systems, vol. 23, no. 3, pp. 2739-2751, 2022. +[134] X. Fang, N. Zhang, S. Zhang, D. Chen, X. Sha, and X. Shen, "On physical layer security: Weighted fractional fourier transform based user cooperation," IEEE Transactions on Wireless Communications, vol. 16, no. 8, pp. 5498-5510, 2017. +[135] W. Tian, X. Ding, G. Liu, Y. Dai, and Z. Han, “A uav-assisted secure communication system by jointly optimizing transmit power and trajectory in the internet of things,” IEEE Transactions on Green Communications and Networking, vol. 7, no. 4, pp. 2025–2037, 2023. +[136] F. Irram, M. Ali, M. Naeem, and S. Mumtaz, "Physical layer security for beyond 5g/6g networks: Emerging technologies and future directions," Journal of Network and Computer Applications, vol. 206, p. 103431, 2022. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S108480452200087X +[137] W. Lu, P. Si, F. Lu, B. Li, Z. Liu, S. Hu, and Y. Gong, "Resource and trajectory optimization in uav-powered wireless communication system," Science China Information Sciences, vol. 64, no. 4, p. 140304, Mar 2021, accessed: 2025-01-03. [Online]. Available: https://doi.org/10.1007/s11432-020-3060-4 +[138] J. Luo, Z. Wang, M. Xia, L. Wu, Y. Tian, and Y. Chen, "Path planning for uav communication networks: Related technologies, solutions, and opportunities," ACM Comput. Surv., vol. 55, no. 9, Jan. 2023. [Online]. Available: https://doi-org.remotexs.ntu.edu.sg/10.1145/3560261 +[139] A. V. Savkin, H. Huang, and W. Ni, “Securing uav communication in the presence of stationary or mobile eavesdroppers via online 3d trajectory planning,” IEEE Wireless Communications Letters, vol. 9, no. 8, pp. 1211–1215, 2020. +[140] X. Zhou, Q. Wu, S. Yan, F. Shu, and J. Li, "Uav-enabled secure communications: Joint trajectory and transmit power optimization," IEEE Transactions on Vehicular Technology, vol. 68, no. 4, pp. 4069-4073, 2019. +[141] R. Ding, F. Gao, and X. S. Shen, "3d uav trajectory design and frequency band allocation for energy-efficient and fair communication: A deep reinforcement learning approach," IEEE Transactions on Wireless Communications, vol. 19, no. 12, pp. 7796-7809, 2020. + +[142] C. Zhong, J. Yao, and J. Xu, "Secure uav communication with cooperative jamming and trajectory control," IEEE Communications Letters, vol. 23, no. 2, pp. 286-289, 2019. +[143] Y. Bai, H. Zhao, X. Zhang, Z. Chang, R. Jantti, and K. Yang, "Toward autonomous multi-uav wireless network: A survey of reinforcement learning-based approaches," IEEE Communications Surveys & Tutorials, vol. 25, no. 4, pp. 3038-3067, 2023. +[144] R. Dong, B. Wang, K. Cao, J. Tian, and T. Cheng, "Secure transmission design of ris enabled uav communication networks exploiting deep reinforcement learning," IEEE Transactions on Vehicular Technology, vol. 73, no. 6, pp. 8404-8419, 2024. +[145] X. Tang, T. Jiang, J. Liu, B. Li, D. Zhai, F. R. Yu, and Z. Han, "Secure communication with uav-enabled aerial ris: Learning trajectory with reflection optimization," IEEE Transactions on Intelligent Vehicles, pp. 1-10, 2023. +[146] J. Duan, Y. Guan, S. E. Li, Y. Ren, Q. Sun, and B. Cheng, "Distribu-tional soft actor-critic: Off-policy reinforcement learning for addressing value estimation errors," IEEE Transactions on Neural Networks and Learning Systems, vol. 33, no. 11, pp. 6584-6598, 2022. +[147] W. Chen, X. Qiu, T. Cai, H.-N. Dai, Z. Zheng, and Y. Zhang, “Deep reinforcement learning for internet of things: A comprehensive survey,” IEEE Communications Surveys & Tutorials, vol. 23, no. 3, pp. 1659–1692, 2021. +[148] F. Tang, H. Hofner, N. Kato, K. Kaneko, Y. Yamashita, and M. Hangai, “A deep reinforcement learning-based dynamic traffic offloading in space-air-ground integrated networks (sagin),” IEEE Journal on Selected Areas in Communications, vol. 40, no. 1, pp. 276–289, 2022. +[149] N. Yang, S. Chen, H. Zhang, and R. Berry, “Beyond the edge: An advanced exploration of reinforcement learning for mobile edge computing, its applications, and future research trajectories,” IEEE Communications Surveys & Tutorials, pp. 1-1, 2024. +[150] Q. Mao, F. Hu, and Q. Hao, “Deep learning for intelligent wireless networks: A comprehensive survey,” IEEE Communications Surveys & Tutorials, vol. 20, no. 4, pp. 2595–2621, 2018. +[151] P. Consul, I. Budhiraja, and D. Garg, "A hybrid secure resource allocation and trajectory optimization approach for mobile edge computing using federated learning based on web 3.0," IEEE Transactions on Consumer Electronics, vol. 70, no. 1, pp. 1167-1179, 2024. +[152] X. Hou, J. Wang, Z. Zhang, J. Wang, L. Liu, and Y. Ren, "Split federated learning for uav-enabled integrated sensing, computation, and communication," arXiv preprint arXiv:2504.01443, 2025. +[153] K. Heo, W. Lee, and K. Lee, “Uav-assisted wireless-powered secure communications: Integration of optimization and deep learning,” IEEE Transactions on Wireless Communications, vol. 23, no. 9, pp. 10530–10545, 2024. +[154] U. A. Mughal, Y. Alkhrijah, A. Almadhor, and C. Yuen, “Deep learning for secure uav-assisted ris communication networks,” IEEE Internet of Things Magazine, vol. 7, no. 2, pp. 38-44, 2024. +[155] R. Dong, B. Wang, and K. Cao, "Deep learning driven 3d robust beamforming for secure communication of uav systems," IEEE Wireless Communications Letters, vol. 10, no. 8, pp. 1643-1647, 2021. +[156] M. Chen, U. Challita, W. Saad, C. Yin, and M. Debbah, "Artificial neural networks-based machine learning for wireless networks: A tutorial," IEEE Communications Surveys & Tutorials, vol. 21, no. 4, pp. 3039-3071, 2019. +[157] M. T. Nguyen and L. B. Le, “Multi-uav trajectory control, resource allocation, and nomai user pairing for uplink energy minimization,” IEEE Internet of Things Journal, vol. 9, no. 23, pp. 23 728–23 740, 2022. +[158] X. Liao, J. Shi, Z. Li, L. Zhang, and B. Xia, “A model-driven deep reinforcement learning heuristic algorithm for resource allocation in ultra-dense cellular networks,” IEEE Transactions on Vehicular Technology, vol. 69, no. 1, pp. 983–997, 2020. +[159] X. Liao, J. Si, J. Shi, Z. Li, and H. Ding, "Generative adversarial network assisted power allocation for cooperative cognitive covert communication system," IEEE Communications Letters, vol. 24, no. 7, pp. 1463-1467, 2020. +[160] Y. Zhou, P. L. Yeoh, H. Chen, Y. Li, R. Schober, L. Zhuo, and B. Vucetic, "Improving physical layer security via a uav friendly jammer for unknown eavesdropper location," IEEE Transactions on Vehicular Technology, vol. 67, no. 11, pp. 11280-11284, 2018. +[161] H. Cao, C. Tan, Z. Gao, Y. Xu, G. Chen, P.-A. Heng, and S. Z. Li, “A survey on generative diffusion models,” IEEE Transactions on Knowledge and Data Engineering, vol. 36, no. 7, pp. 2814–2830, 2024. +[162] D. Chen, N. Zhang, N. Cheng, K. Zhang, Z. Qin, and X. Shen, "Physical layer based message authentication with secure channel + +codes," IEEE Transactions on Dependable and Secure Computing, vol. 17, no. 5, pp. 1079-1093, 2020. +[163] G. Bansal and B. Sikdar, “S-maps: Scalable mutual authentication protocol for dynamic uav swarms,” IEEE Transactions on Vehicular Technology, vol. 70, no. 11, pp. 12088-12100, 2021. +[164] B. Chatterjee, D. Das, S. Maity, and S. Sen, "Rf-puf: Enhancing iot security through authentication of wireless nodes using in-situ machine learning," IEEE Internet of Things Journal, vol. 6, no. 1, pp. 388-398, 2019. +[165] G. Bansal, N. Naren, V. Chamola, B. Sikdar, N. Kumar, and M. Guizani, "Lightweight mutual authentication protocol for v2g using physical unclonable function," IEEE Transactions on Vehicular Technology, vol. 69, no. 7, pp. 7234-7246, 2020. +[166] C. Pu, A. Wall, K.-K. R. Choo, I. Ahmed, and S. Lim, "A lightweight and privacy-preserving mutual authentication and key agreement protocol for internet of drones environment," IEEE Internet of Things Journal, vol. 9, no. 12, pp. 9918-9933, 2022. +[167] Z. Zhang, C. Hsu, M. H. Au, L. Harn, J. Cui, Z. Xia, and Z. Zhao, "Prlap-iod: A puf-based robust and lightweight authentication protocol for internet of drones," Computer Networks, vol. 238, p. 110118, 2024. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S1389128623005637 +[168] J. Liu and X. Wang, "Physical layer authentication enhancement using two-dimensional channel quantization," IEEE Transactions on Wireless Communications, vol. 15, no. 6, pp. 4171-4182, 2016. +[169] X. Lu, J. Lei, Y. Shi, and W. Li, "Improved physical layer authentication scheme based on wireless channel phase," IEEE Wireless Communications Letters, vol. 11, no. 1, pp. 198-202, 2022. +[170] N. Xie, J. Chen, and L. Huang, “Physical-layer authentication using multiple channel-based features,” IEEE Transactions on Information Forensics and Security, vol. 16, pp. 2356-2366, 2021. +[171] Y. Zhou, Z. Ma, H. Liu, P. L. Yeoh, Y. Li, and B. Vucetic, "Signal-to-noise ratio based physical layer authentication in uav communications," in 2023 IEEE 34th Annual International Symposium on Personal, Indoor and Mobile Radio Communications (PIMRC), 2023, pp. 1-6. +[172] Y. Shang, Y. Peng, R. Ye, and J. Lee, “Ris-assisted secure uav communication scheme against active jamming and passive eavesdropping,” IEEE Transactions on Intelligent Transportation Systems, vol. 25, no. 11, pp. 16953-16963, 2024. +[173] Y. Wu, X. Guan, W. Yang, and Q. Wu, “Uav swarm communication under malicious jamming: Joint trajectory and clustering design,” IEEE Wireless Communications Letters, vol. 10, no. 10, pp. 2264–2268, 2021. +[174] Z. Shen, K. Xu, and X. Xia, "Beam-domain anti-jamming transmission for downlink massive mimo systems: A stackelberg game perspective," IEEE Transactions on Information Forensics and Security, vol. 16, pp. 2727-2742, 2021. +[175] X. Li, J. Chen, X. Ling, and T. Wu, “Deep reinforcement learning-based anti-jamming algorithm using dual action network,” IEEE Transactions on Wireless Communications, vol. 22, no. 7, pp. 4625–4637, 2023. +[176] L. Jia, N. Qi, F. Chu, S. Fang, X. Wang, S. Ma, and S. Feng, "Game-theoretic learning anti-jamming approaches in wireless networks," IEEE Communications Magazine, vol. 60, no. 5, pp. 60-66, 2022. +[177] F. Yao and L. Jia, “A collaborative multi-agent reinforcement learning anti-jamming algorithm in wireless networks,” IEEE Wireless Communications Letters, vol. 8, no. 4, pp. 1024–1027, 2019. +[178] E. Schmidt, N. Gatsis, and D. Akopian, “A gps spoofing detection and classification correlator-based technique using the lasso,” IEEE Transactions on Aerospace and Electronic Systems, vol. 56, no. 6, pp. 4224–4237, 2020. +[179] B. Pardhasaradhi and L. R. Cenkeramaddi, "Gps spoofing detection and mitigation for drones using distributed radar tracking and fusion," IEEE Sensors Journal, vol. 22, no. 11, pp. 11 122-11 134, 2022. +[180] Z. Chen, J. Li, J. Li, X. Zhu, and C. Li, "Gnss multiparameter spoofing detection method based on support vector machine," IEEE Sensors Journal, vol. 22, no. 18, pp. 17864-17874, 2022. +[181] X. Chen, D. He, X. Yan, W. Yu, and T.-K. Truong, "Gnss interference type recognition with fingerprint spectrum dnn method," IEEE Transactions on Aerospace and Electronic Systems, vol. 58, no. 5, pp. 4745-4760, 2022. +[182] Y. Dang, C. Benzaïd, Y. Shen, and T. Taleb, "Gps spoofing detector with adaptive trustable residence area for cellular based-uavs," in GLOBECOM 2020 - 2020 IEEE Global Communications Conference, 2020, pp. 1-6. +[183] V. Chandola, A. Banerjee, and V. Kumar, "Anomaly detection: A survey," ACM Comput. Surv., vol. 41, no. 3, Jul. 2009. [Online]. Available: https://doi.org/10.1145/1541880.1541882 + +[184] B. Balaji and K. Friston, "Bayesian state estimation using generalized coordinates," Signal processing, sensor fusion, and target recognition XX, vol. 8050, pp. 716-727, 2011. +[185] M. Baydoun, D. Campo, V. Sanguineti, L. Marcenaro, A. Cavallaro, and C. Regazzoni, “Learning switching models for abnormality detection for autonomous driving,” in 2018 21st International Conference on Information Fusion (FUSION), 2018, pp. 2606–2613. +[186] L. Pardo, Statistical inference based on divergence measures. Chapman and Hall/CRC, 2018. +[187] A. Krayani, M. Baydoun, L. Marcenaro, A. S. Alam, and C. Regazzoni, "Self-learning bayesian generative models for jammer detection in cognitive-uav-radios," in GLOBECOM 2020 - 2020 IEEE Global Communications Conference, 2020, pp. 1-7. +[188] W. Xie, G. Sun, J. Wang, H. Du, J. Kang, K. Huang, and V. Leung, “Multi-objective aerial iris-assisted isac optimization via generative ai-enhanced deep reinforcement learning,” arXiv preprint arXiv:2502.10687, 2025. +[189] J. Wang, H. Du, Y. Liu, G. Sun, D. Niyato, S. Mao, D. I. Kim, and X. Shen, "Generative ai based secure wireless sensing for isac networks," arXiv preprint arXiv:2408.11398, 2024. +[190] X. Wang, C. P. Tan, Y. Wang, and X. Wang, “Defending uav networks against covert attacks using auxiliary signal injections,” IEEE Transactions on Automation Science and Engineering, pp. 1–13, 2024. +[191] M. Valkama, M. Renfors, and V. Koivunen, “Advanced methods for i/q imbalance compensation in communication receivers,” IEEE Transactions on Signal Processing, vol. 49, no. 10, pp. 2335–2344, 2001. +[192] J. Zhang and Y. R. Zheng, "Frequency-domain turbo equalization with soft successive interference cancellation for single carrier mimo underwater acoustic communications," IEEE Transactions on Wireless Communications, vol. 10, no. 9, pp. 2872-2882, 2011. +[193] P. Madhani, P. Axelrad, K. Krumvieda, and J. Thomas, "Application of successive interference cancellation to the gps pseudolite near-far problem," IEEE Transactions on Aerospace and Electronic Systems, vol. 39, no. 2, pp. 481-488, 2003. +[194] P. Patel and J. Holtzman, "Analysis of a simple successive interference cancellation scheme in a ds/cdma system," IEEE Journal on Selected Areas in Communications, vol. 12, no. 5, pp. 796-807, 1994. +[195] M. L. Psiaki and T. E. Humphreys, “Gnss spoofing and detection,” Proceedings of the IEEE, vol. 104, no. 6, pp. 1258–1270, 2016. +[196] T. E. Humphreys, “Detection strategy for cryptographic gnss anti-spoofing,” IEEE Transactions on Aerospace and Electronic Systems, vol. 49, no. 2, pp. 1073–1090, 2013. +[197] Z. Wu, R. Liu, and H. Cao, "Ecdsa-based message authentication scheme for beidou-ii navigation satellite system," IEEE Transactions on Aerospace and Electronic Systems, vol. 55, no. 4, pp. 1666-1682, 2019. +[198] K. Wesson, M. Rothlisberger, and T. Humphreys, “Practical cryptographic civilgps signal authentication,” NAVIGATION: Journal of the Institute of Navigation, vol. 59, no. 3, pp. 177–193, 2012. +[199] A. Ranganathan, H. Olafsdóttir, and S. Capkun, "Spree: a spoofing resistant gps receiver," in Proceedings of the 22nd Annual International Conference on Mobile Computing and Networking, ser. MobiCom '16. New York, NY, USA: Association for Computing Machinery, 2016, p. 348-360. [Online]. Available: https://doi.org/10.1145/2973750.2973753 +[200] M. Ahmed, A. A. Soofi, S. Raza, F. Khan, S. Ahmad, W. U. Khan, M. Asif, F. Xu, and Z. Han, “Advancements in ris-assisted UAV for empowering multiaccess edge computing: A survey,” IEEE Internet of Things Journal, vol. 12, no. 6, pp. 6325–6346, 2025. +[201] G. K. Pandey, D. S. Gurjar, S. Yadav, Y. Jiang, and C. Yuen, “Uav-assisted communications with rf energy harvesting: A comprehensive survey,” IEEE Communications Surveys & Tutorials, pp. 1-1, 2024. +[202] P. Cao, L. Lei, S. Cai, G. Shen, X. Liu, X. Wang, L. Zhang, L. Zhou, and M. Guizani, "Computational intelligence algorithms for uav swarm networking and collaboration: A comprehensive survey and future directions," IEEE Communications Surveys & Tutorials, vol. 26, no. 4, pp. 2684-2728, 2024. +[203] P. Li, H. Zhang, Y. Wu, L. Qian, R. Yu, D. Niyato, and X. Shen, "Filling the missing: Exploring generative ai for enhanced federated learning over heterogeneous mobile edge devices," IEEE Transactions on Mobile Computing, vol. 23, no. 10, pp. 10001-10015, 2024. +[204] J. Wang, Y. Liu, H. Du, D. Niyato, J. Kang, H. Zhou, and D. I. Kim, "Empowering wireless networks with artificial intelligence generated graph," arXiv preprint arXiv:2405.04907, 2024. + +[205] M. Xu, D. Niyato, J. Kang, Z. Xiong, S. Mao, Z. Han, D. I. Kim, and K. B. Letaief, "When large language model agents meet 6g networks: Perception, grounding, and alignment," IEEE Wireless Communications, vol. 31, no. 6, pp. 63-71, 2024. +[206] R. Zhang, H. Du, D. Niyato, J. Kang, Z. Xiong, P. Zhang, and D. I. Kim, "Optimizing generative ai networking: A dual perspective with multi-agent systems and mixture of experts," arXiv preprint arXiv:2405.12472, 2024. +[207] A. H. Arani, P. Hu, and Y. Zhu, “Uav-assisted space-air-ground integrated networks: A technical review of recent learning algorithms,” IEEE Open Journal of Vehicular Technology, vol. 5, pp. 1004–1023, 2024. +[208] N. T. T. Van, N. L. Tuan, N. C. Luong, T. H. Nguyen, S. Feng, S. Gong, D. Niyato, and D. I. Kim, "Network access selection for urclc and embb applications in sub-6ghz-mmwave-thz networks: Game theory versus multi-agent reinforcement learning," IEEE Transactions on Communications, pp. 1-1, 2024. +[209] Q. Yuan, L. Xiao, C. He, P. Xiao, and T. Jiang, "Deep learning-based hybrid precoding for ris-aided broadband terahertz communication systems in the face of beam squint," IEEE Wireless Communications Letters, vol. 13, no. 2, pp. 303-307, 2024. +[210] G. Geraci, A. Garcia-Rodriguez, M. M. Azari, A. Lozano, M. Mezzavilla, S. Chatzinotas, Y. Chen, S. Rangan, and M. D. Renzo, "What will the future of uav cellular communications be? a flight from 5g to 6g," IEEE Communications Surveys & Tutorials, vol. 24, no. 3, pp. 1304-1335, 2022. \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09153/images/0a2b881bbb436b377c3415cb335afd7cf07d689d5c423c459fd8a5651c2607af.jpg b/data/2025/2504_09xxx/2504.09153/images/0a2b881bbb436b377c3415cb335afd7cf07d689d5c423c459fd8a5651c2607af.jpg new file mode 100644 index 0000000000000000000000000000000000000000..226d6b15846202927814609dbb01d2e37b16dbf4 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/0a2b881bbb436b377c3415cb335afd7cf07d689d5c423c459fd8a5651c2607af.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f33a1ac96921fef927feed21b8f88663720d62838cda7bdd99774d2055f23f1d +size 52471 diff --git a/data/2025/2504_09xxx/2504.09153/images/12a7ffca05ab706e4c5b6609a65fdf2301b70f22019495c09b13cae13284d61e.jpg b/data/2025/2504_09xxx/2504.09153/images/12a7ffca05ab706e4c5b6609a65fdf2301b70f22019495c09b13cae13284d61e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0589cd6212e683936b7dece5c8b5229787c5d926 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/12a7ffca05ab706e4c5b6609a65fdf2301b70f22019495c09b13cae13284d61e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c19f208b30e286c7e3268cb6f621b9a11c7dd77bf5af53b9339ae32bb8901344 +size 20207 diff --git a/data/2025/2504_09xxx/2504.09153/images/139af8a9a84b86d4198cc46ae8f1d98f29cd82458280ac122e062d30d4140bb7.jpg b/data/2025/2504_09xxx/2504.09153/images/139af8a9a84b86d4198cc46ae8f1d98f29cd82458280ac122e062d30d4140bb7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1bcfd0347481d03955e223ae2e6db03e44c5c340 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/139af8a9a84b86d4198cc46ae8f1d98f29cd82458280ac122e062d30d4140bb7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb586ff6e1df143c9b02a48de171b47b89ae1b0ff8ff1c707069a4933ad2f2ca +size 14675 diff --git a/data/2025/2504_09xxx/2504.09153/images/1411e6ebf9ab0f74ca0614dc20359a8ea382871faee714eff22b4eddae00662e.jpg b/data/2025/2504_09xxx/2504.09153/images/1411e6ebf9ab0f74ca0614dc20359a8ea382871faee714eff22b4eddae00662e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8f3de85f6c59eff610e70cf3bc089bfa764ec1c1 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/1411e6ebf9ab0f74ca0614dc20359a8ea382871faee714eff22b4eddae00662e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6416c7df8aef51e19be6bcdf0db53237841ae4aa3debd6a54bb7475ff451fa3e +size 193262 diff --git a/data/2025/2504_09xxx/2504.09153/images/1a454107197f0583fff2fc6224b7c4e7b0c3d8f3354f603056d3b238606f5780.jpg b/data/2025/2504_09xxx/2504.09153/images/1a454107197f0583fff2fc6224b7c4e7b0c3d8f3354f603056d3b238606f5780.jpg new file mode 100644 index 0000000000000000000000000000000000000000..80150540c6a87ba4c1a3869c23fec9072feb1f7d --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/1a454107197f0583fff2fc6224b7c4e7b0c3d8f3354f603056d3b238606f5780.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7dbcf9d32bf913d47321681841bbec7bb1c36ee5414255e4cdea7c5c61d3bad +size 106517 diff --git a/data/2025/2504_09xxx/2504.09153/images/299abeaa978c56d61eea7db4a1382c43b7e456df3bd4d429ea25d378ff930e93.jpg b/data/2025/2504_09xxx/2504.09153/images/299abeaa978c56d61eea7db4a1382c43b7e456df3bd4d429ea25d378ff930e93.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3e36bab38683f3b0e14c4cebc12aae735e863e5a --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/299abeaa978c56d61eea7db4a1382c43b7e456df3bd4d429ea25d378ff930e93.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b8efb57e45e523416fb12e3f3980f1298f29fd0086d2bbf6414419fbe0aab06 +size 32521 diff --git a/data/2025/2504_09xxx/2504.09153/images/2ab8ba0297c1868c628e1e48264ec055ad4506c4314985e9ce67835ee4420c3e.jpg b/data/2025/2504_09xxx/2504.09153/images/2ab8ba0297c1868c628e1e48264ec055ad4506c4314985e9ce67835ee4420c3e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2170fbb10de012d1e98c6bce050c987e5cbbd864 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/2ab8ba0297c1868c628e1e48264ec055ad4506c4314985e9ce67835ee4420c3e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7897ad832aa7f880e06eda1a9470b77cfc036fdf8bfedbeeb4858e8f931ce05a +size 33288 diff --git a/data/2025/2504_09xxx/2504.09153/images/34be38f581f31f563210090b46c7023435f7f21295c66f10111880e33a75aa87.jpg b/data/2025/2504_09xxx/2504.09153/images/34be38f581f31f563210090b46c7023435f7f21295c66f10111880e33a75aa87.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d08e0a92495755617e3cfd96578d1d4292eb9354 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/34be38f581f31f563210090b46c7023435f7f21295c66f10111880e33a75aa87.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d23590128fc9ce438c7724e7d92618e502e31f9240830a857e97369782797d8 +size 175114 diff --git a/data/2025/2504_09xxx/2504.09153/images/35f27e74f7c34c861495fa1c76195ab51abea9e536ca4fcaa9d1d7ca44689523.jpg b/data/2025/2504_09xxx/2504.09153/images/35f27e74f7c34c861495fa1c76195ab51abea9e536ca4fcaa9d1d7ca44689523.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5157fce6147e21886a81fe1656084c830ebc3f9a --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/35f27e74f7c34c861495fa1c76195ab51abea9e536ca4fcaa9d1d7ca44689523.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2015cd996375c7ec2c181cb9b048ffa97eae311e3c80df9b1867e07d45685903 +size 164416 diff --git a/data/2025/2504_09xxx/2504.09153/images/44334917753d0735f58e494eaeb14ab8b73c2c003ece0c5817ff07e552328506.jpg b/data/2025/2504_09xxx/2504.09153/images/44334917753d0735f58e494eaeb14ab8b73c2c003ece0c5817ff07e552328506.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a94d85fdc085b8a601461b1627db9a4f5b0ca6bb --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/44334917753d0735f58e494eaeb14ab8b73c2c003ece0c5817ff07e552328506.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:289cfa017e9508731c1ec41d7ff289dcbeb8f64826d0fbfc6fc633f991b92fe7 +size 21527 diff --git a/data/2025/2504_09xxx/2504.09153/images/455a544780c102ab3732612d2d11ccdd816c55ddf36f38780acd13758b685f1b.jpg b/data/2025/2504_09xxx/2504.09153/images/455a544780c102ab3732612d2d11ccdd816c55ddf36f38780acd13758b685f1b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..75eb60a6b10ee1515fe0f30d1b9d4e9746b389c3 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/455a544780c102ab3732612d2d11ccdd816c55ddf36f38780acd13758b685f1b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d66bada95eb13653ece55ba37f507ecd5736521295e11c0e6090c6b9112a3e7b +size 34232 diff --git a/data/2025/2504_09xxx/2504.09153/images/45d7c87df94084b85e80ffec3b31f41bc270ce4f3ed9cdb12fa2a0d42e0bd614.jpg b/data/2025/2504_09xxx/2504.09153/images/45d7c87df94084b85e80ffec3b31f41bc270ce4f3ed9cdb12fa2a0d42e0bd614.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2275767bbb1d3e2c59dea6e7c3b2226bba4b15d3 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/45d7c87df94084b85e80ffec3b31f41bc270ce4f3ed9cdb12fa2a0d42e0bd614.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88b585508fe0fed6e8c51a56f4467ad92e5f370c6b3f86159df67c0701ffee81 +size 37752 diff --git a/data/2025/2504_09xxx/2504.09153/images/4cb234887973d9ac25e079403f546e492bf9527c15b28c61b6fc521af34f7172.jpg b/data/2025/2504_09xxx/2504.09153/images/4cb234887973d9ac25e079403f546e492bf9527c15b28c61b6fc521af34f7172.jpg new file mode 100644 index 0000000000000000000000000000000000000000..935381376f89b15847128a6144bd68a484f9e502 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/4cb234887973d9ac25e079403f546e492bf9527c15b28c61b6fc521af34f7172.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:426c470b681d33b5628bac5615a5b28006dfd2616a2b778b263d45168748f03d +size 16964 diff --git a/data/2025/2504_09xxx/2504.09153/images/50ec81d8ac805f5b51db78cda3503bc04eb8a6e427ed0449fb7c44c1037e1683.jpg b/data/2025/2504_09xxx/2504.09153/images/50ec81d8ac805f5b51db78cda3503bc04eb8a6e427ed0449fb7c44c1037e1683.jpg new file mode 100644 index 0000000000000000000000000000000000000000..656e461d4a0b41b9c4760c6464fde17fcb2ed951 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/50ec81d8ac805f5b51db78cda3503bc04eb8a6e427ed0449fb7c44c1037e1683.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aee2dca47f641296a0e537c537b830ab5d9fb09079b3d7c0a0203d617ce4a809 +size 22662 diff --git a/data/2025/2504_09xxx/2504.09153/images/57804c0b57ed072e72c64d0c713f6844a0ec91912cdd542854e4d53830070bae.jpg b/data/2025/2504_09xxx/2504.09153/images/57804c0b57ed072e72c64d0c713f6844a0ec91912cdd542854e4d53830070bae.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b8430fe5c3a2e09c6b315f98d8525fb88e2717ad --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/57804c0b57ed072e72c64d0c713f6844a0ec91912cdd542854e4d53830070bae.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:639548ff970b84e62f54ab9170368ad8492e87cb6a85a20666b9b7ad350a6eb4 +size 53983 diff --git a/data/2025/2504_09xxx/2504.09153/images/59767b138893846c4828b5215fcedab577ef94ef050884296e36a852a110260c.jpg b/data/2025/2504_09xxx/2504.09153/images/59767b138893846c4828b5215fcedab577ef94ef050884296e36a852a110260c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5612a1024ce2fd932c2db5e83a2cb9d884f9cd4b --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/59767b138893846c4828b5215fcedab577ef94ef050884296e36a852a110260c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e457d9e8e604f443d5e7772f5ab26fd0058f10d181df5c333a25929325a41cef +size 117199 diff --git a/data/2025/2504_09xxx/2504.09153/images/736a80a4e6b5887a9f24b64146c7c6443017316dd655d4bae306fadd65bc5feb.jpg b/data/2025/2504_09xxx/2504.09153/images/736a80a4e6b5887a9f24b64146c7c6443017316dd655d4bae306fadd65bc5feb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..23cc8ff4f3c769e31caafb0ae2b6425867b24fbc --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/736a80a4e6b5887a9f24b64146c7c6443017316dd655d4bae306fadd65bc5feb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0aeb9766b8f9c527caf8c2d49c2ac8103479fe0c121825af880e8f4f0823664 +size 162611 diff --git a/data/2025/2504_09xxx/2504.09153/images/74dd6b7f5134e0d980159677bb8d2dbfb39a1c546207ad08c741ba3fb68171b0.jpg b/data/2025/2504_09xxx/2504.09153/images/74dd6b7f5134e0d980159677bb8d2dbfb39a1c546207ad08c741ba3fb68171b0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4d6a5048c1379483866814a5d9849caadf2b715c --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/74dd6b7f5134e0d980159677bb8d2dbfb39a1c546207ad08c741ba3fb68171b0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c0ac31f70bb6469151f5b97f1ff72fa37717cd2e36c0f4d8a956a7690234c3de +size 15133 diff --git a/data/2025/2504_09xxx/2504.09153/images/7550af879c50bedad2e22d1c77a418e5ebe094455580bef1f05b38b4074d674c.jpg b/data/2025/2504_09xxx/2504.09153/images/7550af879c50bedad2e22d1c77a418e5ebe094455580bef1f05b38b4074d674c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6f9408dae5d1a7b00bdf859e2d60298543a00f1d --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/7550af879c50bedad2e22d1c77a418e5ebe094455580bef1f05b38b4074d674c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4053273b7d280e4ffd19a40074916aa556a40778934be1226ea247b8033e8fdd +size 42723 diff --git a/data/2025/2504_09xxx/2504.09153/images/77a3923ca021fd3efb360f7086344f55ac8a6517a1d3d497b159cd347884c6bc.jpg b/data/2025/2504_09xxx/2504.09153/images/77a3923ca021fd3efb360f7086344f55ac8a6517a1d3d497b159cd347884c6bc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a663e460dfa0e1962e743f0437b3e0ab8733518a --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/77a3923ca021fd3efb360f7086344f55ac8a6517a1d3d497b159cd347884c6bc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aea1e40bd0df05a495f69d645a52b895896820d0cb38d2fd0ec06592e6895c8b +size 36991 diff --git a/data/2025/2504_09xxx/2504.09153/images/7c7009c9e038fcda366a5af32ce1c4b450452de01631391440f888fae30fd149.jpg b/data/2025/2504_09xxx/2504.09153/images/7c7009c9e038fcda366a5af32ce1c4b450452de01631391440f888fae30fd149.jpg new file mode 100644 index 0000000000000000000000000000000000000000..05fc7aa0733c73df103e496d04a428edb258c176 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/7c7009c9e038fcda366a5af32ce1c4b450452de01631391440f888fae30fd149.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ea748bcaf039165b2976921a5eb33f6a70ad801d6df8962468c9ace2398ab20 +size 26083 diff --git a/data/2025/2504_09xxx/2504.09153/images/90cd29842c633091f87cc0f88b31ff42f3aa3d723ca41aa33e5bffc52b6ea94e.jpg b/data/2025/2504_09xxx/2504.09153/images/90cd29842c633091f87cc0f88b31ff42f3aa3d723ca41aa33e5bffc52b6ea94e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1da8755ea3fea422bffa20dcc62e2d0673cc2f5d --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/90cd29842c633091f87cc0f88b31ff42f3aa3d723ca41aa33e5bffc52b6ea94e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b90450d993bd909d26eaa2bb2189398294c87d54663004a7dbb6030d6a5759f +size 18761 diff --git a/data/2025/2504_09xxx/2504.09153/images/95b7178677e3e6554d2277956eb4b90047bdfe2645c90f9834611a75b83e6307.jpg b/data/2025/2504_09xxx/2504.09153/images/95b7178677e3e6554d2277956eb4b90047bdfe2645c90f9834611a75b83e6307.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7d99fb2ad4ab8e564725d8299d71317a23936bc1 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/95b7178677e3e6554d2277956eb4b90047bdfe2645c90f9834611a75b83e6307.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76a67af89d0eb16d04d40e17039d6883ee1cb8890a2d8b12728f85f98ab80c8d +size 30120 diff --git a/data/2025/2504_09xxx/2504.09153/images/95fa115c55a605022b9fd1a951542bb487ecec131003b7074dab8d10a903cb98.jpg b/data/2025/2504_09xxx/2504.09153/images/95fa115c55a605022b9fd1a951542bb487ecec131003b7074dab8d10a903cb98.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dee51275e26c45a6f081bdbd9b1f614b1fe47071 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/95fa115c55a605022b9fd1a951542bb487ecec131003b7074dab8d10a903cb98.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe73050ae19b421cf2f9f8a038e4f006da2ae5406222a36b063cc33508afeb44 +size 35494 diff --git a/data/2025/2504_09xxx/2504.09153/images/967f281427d80e32166ad3daced3cb08fb45a53cc42cab57a86b73497c509e23.jpg b/data/2025/2504_09xxx/2504.09153/images/967f281427d80e32166ad3daced3cb08fb45a53cc42cab57a86b73497c509e23.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ff9998bd080cbcbf587df627e50bd433bc325fb6 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/967f281427d80e32166ad3daced3cb08fb45a53cc42cab57a86b73497c509e23.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5458c44ce98273c47e63dff8cc95cefba6d3841475b887e23341eda238a9c1f6 +size 33797 diff --git a/data/2025/2504_09xxx/2504.09153/images/a098c7255a5103f909ef1e57c6ad04b2fe83cf8cff00c95130989f6352c468f0.jpg b/data/2025/2504_09xxx/2504.09153/images/a098c7255a5103f909ef1e57c6ad04b2fe83cf8cff00c95130989f6352c468f0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ced6d221749c5ff269173e254a837b4e51fad000 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/a098c7255a5103f909ef1e57c6ad04b2fe83cf8cff00c95130989f6352c468f0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07ae8f43b6b6c7891254e90a58c2aba720eb5ceb89d50c071b2da1742c397389 +size 164281 diff --git a/data/2025/2504_09xxx/2504.09153/images/ac755b49caad673c5969aa85578ccfc3367cd5587e9664d857f416a9905f81b7.jpg b/data/2025/2504_09xxx/2504.09153/images/ac755b49caad673c5969aa85578ccfc3367cd5587e9664d857f416a9905f81b7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..efa4ef430dab0820e1c85f906125a61942815530 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/ac755b49caad673c5969aa85578ccfc3367cd5587e9664d857f416a9905f81b7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94350294b2a36ccb1643ae16a0078dbf83d5b60b3343cad6605bbdf56f70c9fd +size 157220 diff --git a/data/2025/2504_09xxx/2504.09153/images/aefb9e00909efa991b8f89e18e40ebc9e14f812ef0f371b6668cba7d6f813151.jpg b/data/2025/2504_09xxx/2504.09153/images/aefb9e00909efa991b8f89e18e40ebc9e14f812ef0f371b6668cba7d6f813151.jpg new file mode 100644 index 0000000000000000000000000000000000000000..54cfd0d147f94a9a368d1fdb8d4793ee34f176d7 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/aefb9e00909efa991b8f89e18e40ebc9e14f812ef0f371b6668cba7d6f813151.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:852e603cba241d72bced7d8b8ae26071ebf1b253d07384932af0dba4b2b1917a +size 125112 diff --git a/data/2025/2504_09xxx/2504.09153/images/b0ced301d684649bd66f097e1413eaf4b7a34732744caf3707d8fc50381c1366.jpg b/data/2025/2504_09xxx/2504.09153/images/b0ced301d684649bd66f097e1413eaf4b7a34732744caf3707d8fc50381c1366.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5305f720fe3c9d968c4c829b165865c69cc4577b --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/b0ced301d684649bd66f097e1413eaf4b7a34732744caf3707d8fc50381c1366.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b348a94ced561cdaae26bef4d39ceb5ae8c3caa9d7924a14bc3f0f7334ccfc1c +size 34206 diff --git a/data/2025/2504_09xxx/2504.09153/images/b18000f7aeb2c577b647d747d82e6d530210f92f8540c1ae3fe4804b6814cb6a.jpg b/data/2025/2504_09xxx/2504.09153/images/b18000f7aeb2c577b647d747d82e6d530210f92f8540c1ae3fe4804b6814cb6a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..32769bab5edad59508c93465a2931068f74f0e38 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/b18000f7aeb2c577b647d747d82e6d530210f92f8540c1ae3fe4804b6814cb6a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec59b6e38252d77edc32ebdf565221fac71210c3c3d45a095318f42e5d33b070 +size 158075 diff --git a/data/2025/2504_09xxx/2504.09153/images/b348c7b89c665fbdeeb480d0daf269e5c8246d7039aaaa623043b3df6f5fb043.jpg b/data/2025/2504_09xxx/2504.09153/images/b348c7b89c665fbdeeb480d0daf269e5c8246d7039aaaa623043b3df6f5fb043.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1db06d6d8462b2dd01655b426559777230a97b5b --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/b348c7b89c665fbdeeb480d0daf269e5c8246d7039aaaa623043b3df6f5fb043.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cdd54ef15a5fb0d7c33e6adf7306aea5e4a362023001a25d2a1c84adfa7cdee6 +size 41388 diff --git a/data/2025/2504_09xxx/2504.09153/images/bc80ee2b36b7d014602c429392c608bf414042951a60d35efc799078722210b7.jpg b/data/2025/2504_09xxx/2504.09153/images/bc80ee2b36b7d014602c429392c608bf414042951a60d35efc799078722210b7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..232704cb6f689a5a35bc0dd0d904622cd2b7d6ac --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/bc80ee2b36b7d014602c429392c608bf414042951a60d35efc799078722210b7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f8e949726482bfc2529f44122049c1cdfe5890e0d0de431e7b184c094c139d6 +size 20785 diff --git a/data/2025/2504_09xxx/2504.09153/images/bd4f13f7a34b1eba31da2c93966f6b7ca47565af3508ca10543f719898a1c39b.jpg b/data/2025/2504_09xxx/2504.09153/images/bd4f13f7a34b1eba31da2c93966f6b7ca47565af3508ca10543f719898a1c39b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3d29386aadcee8c8adbd77dea0e82634e56dbb1d --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/bd4f13f7a34b1eba31da2c93966f6b7ca47565af3508ca10543f719898a1c39b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d26437e1d7108203b80d30be3afc725efa4c412d5f75ba915e563357c62a552e +size 180230 diff --git a/data/2025/2504_09xxx/2504.09153/images/c26ca86189ba5e15945340467e93f9446f6ab0e20c20d86854d84170b44e6d6a.jpg b/data/2025/2504_09xxx/2504.09153/images/c26ca86189ba5e15945340467e93f9446f6ab0e20c20d86854d84170b44e6d6a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..50da796a7fbb76d4e829bd4b006d8f34649b3792 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/c26ca86189ba5e15945340467e93f9446f6ab0e20c20d86854d84170b44e6d6a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9912a29f86f7f96cbfe9c7488d2a756fa15bf7be42b784ce8d6dae90ad407277 +size 29117 diff --git a/data/2025/2504_09xxx/2504.09153/images/c4e8fccf864b7793ee0ff85b396ae3929eca2320ee801b9671f6ac1ac3fdaf87.jpg b/data/2025/2504_09xxx/2504.09153/images/c4e8fccf864b7793ee0ff85b396ae3929eca2320ee801b9671f6ac1ac3fdaf87.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f424f7a47da3434217fec751d158ebe839ffd4a9 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/c4e8fccf864b7793ee0ff85b396ae3929eca2320ee801b9671f6ac1ac3fdaf87.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c694b156b6583e2f5e20d54e0cab1541c509d89fa902ce6ff7cdb311e464d42 +size 246700 diff --git a/data/2025/2504_09xxx/2504.09153/images/d546a765ef9d9111da613533a636f1121d9e0b344a8fa8ca2f8b74b4bc8c977f.jpg b/data/2025/2504_09xxx/2504.09153/images/d546a765ef9d9111da613533a636f1121d9e0b344a8fa8ca2f8b74b4bc8c977f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..38dacee8f8bd59554982b42e75d62384b4ed3fde --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/d546a765ef9d9111da613533a636f1121d9e0b344a8fa8ca2f8b74b4bc8c977f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:208b47d46c31f3b1adf8b580da1bc92891f27c1d52c5555e6874303c42e0b920 +size 199861 diff --git a/data/2025/2504_09xxx/2504.09153/images/ec488ac4d64ffc43f41564e983772de945b631322c5ef73f5df7d5475abec595.jpg b/data/2025/2504_09xxx/2504.09153/images/ec488ac4d64ffc43f41564e983772de945b631322c5ef73f5df7d5475abec595.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b08247ab85832cb4ee96bbf68ba3f5d6c7a129d1 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/ec488ac4d64ffc43f41564e983772de945b631322c5ef73f5df7d5475abec595.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59d825902f9fca86a23bd6c5a45a8353797fc68bb64933e772f9da5c2a060145 +size 28852 diff --git a/data/2025/2504_09xxx/2504.09153/images/f2a0eafa0a8185bc1fc9742760bf005b8217a9d36dcaad8f6cc2e5c56af12ae0.jpg b/data/2025/2504_09xxx/2504.09153/images/f2a0eafa0a8185bc1fc9742760bf005b8217a9d36dcaad8f6cc2e5c56af12ae0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..992f7863b824fece3fed786dc913147f262f9645 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/f2a0eafa0a8185bc1fc9742760bf005b8217a9d36dcaad8f6cc2e5c56af12ae0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0737c13619ef3cca9152c16744c1abed97343dca09d8411f583f078f1ec84381 +size 41367 diff --git a/data/2025/2504_09xxx/2504.09153/images/fcb150feb0c7dfcec3aa13563eba74bebc6a468a677e0d900fc102f3fcfec36c.jpg b/data/2025/2504_09xxx/2504.09153/images/fcb150feb0c7dfcec3aa13563eba74bebc6a468a677e0d900fc102f3fcfec36c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dfee3b82077f74450058ca967034a0921f0f7fd6 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/fcb150feb0c7dfcec3aa13563eba74bebc6a468a677e0d900fc102f3fcfec36c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c85fc952e2bf4da4e0328ac8db50691365f3f0078b62ecd1efed5b38b6528f98 +size 25568 diff --git a/data/2025/2504_09xxx/2504.09153/images/fce984a856adf58da26013e00ddd120d464b283587d49890dccfd1c7c26d073d.jpg b/data/2025/2504_09xxx/2504.09153/images/fce984a856adf58da26013e00ddd120d464b283587d49890dccfd1c7c26d073d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b01ed3be5fab322fb05a5662a43b9fda66ef4eac --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/images/fce984a856adf58da26013e00ddd120d464b283587d49890dccfd1c7c26d073d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2323478757b08bfacbef73cd91ef19c52c7ad5fec11db7778da767295cedd1a7 +size 169812 diff --git a/data/2025/2504_09xxx/2504.09153/layout.json b/data/2025/2504_09xxx/2504.09153/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..747e0b234ab1a260e36a9f7d6a9430ab46f5ec55 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09153/layout.json @@ -0,0 +1,19156 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 73, + 56, + 536, + 111 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 56, + 536, + 111 + ], + "spans": [ + { + "bbox": [ + 73, + 56, + 536, + 111 + ], + "type": "text", + "content": "Secure Physical Layer Communications for Low-Altitude Economy Networking: A Survey" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 117, + 555, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 117, + 555, + 145 + ], + "spans": [ + { + "bbox": [ + 55, + 117, + 555, + 145 + ], + "type": "text", + "content": "Lingyi Cai, Jiacheng Wang, Ruichen Zhang, Yu Zhang, Tao Jiang, Fellow, IEEE, Dusit Niyato, Fellow, IEEE, Xianbin Wang, Fellow, IEEE, Abbas Jamalipour, Fellow, IEEE, and Xuemin Shen, Fellow, IEEE" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 186, + 301, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 186, + 301, + 437 + ], + "spans": [ + { + "bbox": [ + 45, + 186, + 301, + 437 + ], + "type": "text", + "content": "Abstract—The Low-Altitude Economy Networking (LAENet) is emerging as a transformative paradigm that enables an integrated and sophisticated communication infrastructure to support aerial vehicles in carrying out a wide range of economic activities within low-altitude airspace. However, the physical layer communications in the LAENet face growing security threats due to inherent characteristics of aerial communication environments, such as signal broadcast nature and channel openness. These challenges highlight the urgent need for safeguarding communication confidentiality, availability, and integrity. In view of the above, this survey comprehensively reviews existing secure countermeasures for physical layer communication in the LAENet. We explore core methods focusing on anti-eavesdropping and authentication for ensuring communication confidentiality. Subsequently, availability-enhancing techniques are thoroughly discussed for anti-jamming and spoofing defense. Then, we review approaches for safeguarding integrity through anomaly detection and injection protection. Furthermore, we discuss future research directions, emphasizing energy-efficient physical layer security, multi-drone collaboration for secure communication, AI-driven security defense strategy, space-air-ground integrated security architecture, and 6G-enabled secure UAV communication. This survey may provide valuable references and new insights for researchers in the field of secure physical layer communication for the LAENet." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 441, + 301, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 441, + 301, + 474 + ], + "spans": [ + { + "bbox": [ + 45, + 441, + 301, + 474 + ], + "type": "text", + "content": "Index Terms—Low-altitude economy networking, secure physical layer communications, communication confidentiality, communication availability, communication integrity." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 487, + 215, + 498 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 487, + 215, + 498 + ], + "spans": [ + { + "bbox": [ + 132, + 487, + 215, + 498 + ], + "type": "text", + "content": "I. INTRODUCTION" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 502, + 301, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 502, + 301, + 551 + ], + "spans": [ + { + "bbox": [ + 45, + 502, + 301, + 551 + ], + "type": "text", + "content": "WITH the rapid development of aerial vehicle technologies and communication networks, the concept of Low-Altitude Economic Networking (LAENet) has emerged to enable more comprehensive, large-scale, and intelligent" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 558, + 301, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 558, + 301, + 604 + ], + "spans": [ + { + "bbox": [ + 45, + 558, + 301, + 604 + ], + "type": "text", + "content": "Lingyi Cai is with the Research Center of 6G Mobile Communications, School of Cyber Science and Engineering, Huazhong University of Science and Technology, Wuhan, 430074, China, and also with the College of Computing and Data Science, Nanyang Technological University, Singapore (e-mail: lingyicai@hust.edu.cn)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 45, + 603, + 301, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 603, + 301, + 640 + ], + "spans": [ + { + "bbox": [ + 45, + 603, + 301, + 640 + ], + "type": "text", + "content": "Jiacheng Wang, Ruichen Zhang, and Dusit Niyato are with the College of Computing and Data Science, Nanyang Technological University, Singapore (e-mails: jiacheng.wang@ntu.edu.sg; ruichen.zhang@ntu.edu.sg; dniyato@ntu.edu.sg)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 45, + 639, + 301, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 639, + 301, + 677 + ], + "spans": [ + { + "bbox": [ + 45, + 639, + 301, + 677 + ], + "type": "text", + "content": "Yu Zhang and Tao Jiang are with the Research Center of 6G Mobile Communications, School of Cyber Science and Engineering, Huazhong University of Science and Technology, Wuhan, 430074, China (e-mail: yuzhang123@hust.edu.cn; tao.jiang@ieee.org)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 45, + 675, + 301, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 675, + 301, + 703 + ], + "spans": [ + { + "bbox": [ + 45, + 675, + 301, + 703 + ], + "type": "text", + "content": "Xianbin Wang is with the Department of Electrical and Computer Engineering, Western University, London, ON, N6A 5B9, Canada (e-mail: xianbin.wang@uwo.ca)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 45, + 702, + 301, + 720 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 702, + 301, + 720 + ], + "spans": [ + { + "bbox": [ + 45, + 702, + 301, + 720 + ], + "type": "text", + "content": "Abbas Jamalipour is with the School of Electrical and Computer Engineering, University of Sydney, Australia (e-mail: a.jamalipour@ieee.org)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 45, + 720, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 720, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 45, + 720, + 301, + 748 + ], + "type": "text", + "content": "Xuemin Shen is with the Department of Electrical and Computer Engineering, University of Waterloo, Waterloo, ON N2L 3G1, Canada (e-mail: sshen@uwaterloo.ca)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 186, + 564, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 186, + 564, + 401 + ], + "spans": [ + { + "bbox": [ + 307, + 186, + 564, + 401 + ], + "type": "text", + "content": "connectivity to support various low-altitude activities [1]–[4], such as intelligent transportation, logistics delivery, communication enhancement, disaster monitoring, and emergency response [5]–[8], as shown in Fig. 1. The LAENet is built upon earlier frameworks of single Unmanned Aerial Vehicle (UAV) operation and multi-UAV networks. A single UAV typically maintains a direct link to a ground station or base station, operating with simple control procedures and delivering cost-effective services but with limited range and scalability [9]. The UAV network focuses on formation control and multi-UAV collaboration, enabling broader mission areas and stronger fault tolerance [9]–[11]. Advancing from these foundations, the LAENet integrates various aerial vehicles into a high-density communication network, connecting them not only to ground stations but also to other platforms such as base stations, access points, and even satellites [12], [13]. Thus, the LAENet can enable ubiquitous coverage, high reliability, robust fault tolerance, greater autonomy, and intelligence." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 402, + 564, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 402, + 564, + 675 + ], + "spans": [ + { + "bbox": [ + 308, + 402, + 564, + 675 + ], + "type": "text", + "content": "Specifically, the LAENet refers to an integrated network system that connects various low-altitude flight operations, including general aviation, drones, electric vertical take-off and landing (eVTOL) aircraft, and other aerial platforms, within the designated low-altitude airspace (typically below 1,000 meters, and in some cases extending up to 3,000 meters) [1], [13]. The LAENet serves as a vital bridge between ground-based economies and airspace resources, which will drive technological innovation and unlock substantial social and economic benefits [14], [15]. The Civil Aviation Administration of China estimates that the country's low-altitude market will soar from 500 billion Chinese yuan (about 70 billion US dollars) in 2023 to 1.5 trillion Chinese yuan (about 200 billion US dollars) in 2025 and as much as 3.5 trillion Chinese yuan (about 480 billion US dollars) in 2035 [16]. Currently, research institutions and enterprises across multiple regions in China are continuously advancing and expanding innovative research and commercial applications of UAVs and eVTOLs in low-altitude activities [17]. Meanwhile, in the United States, the Federal Aviation Administration has confirmed its commitment to actively promoting the development of electric air taxis and integrating this type of aircraft into the national airspace [18]." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 677, + 565, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 677, + 565, + 750 + ], + "spans": [ + { + "bbox": [ + 307, + 677, + 565, + 750 + ], + "type": "text", + "content": "In the LAENet, physical layer communication serves as a critical foundation for wireless communication between aerial vehicles and between aerial vehicles and communication infrastructure [10], [28], [31]. The physical layer converts digital data from higher protocol layers into signals suitable for transmission over aerial communication channels [32]–[34]." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 14, + 219, + 36, + 571 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 219, + 36, + 571 + ], + "spans": [ + { + "bbox": [ + 14, + 219, + 36, + 571 + ], + "type": "text", + "content": "arXiv:2504.09153v1 [cs.CR] 12 Apr 2025" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 53, + 563, + 322 + ], + "blocks": [ + { + "bbox": [ + 50, + 53, + 563, + 322 + ], + "lines": [ + { + "bbox": [ + 50, + 53, + 563, + 322 + ], + "spans": [ + { + "bbox": [ + 50, + 53, + 563, + 322 + ], + "type": "image", + "image_path": "bd4f13f7a34b1eba31da2c93966f6b7ca47565af3508ca10543f719898a1c39b.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 331, + 565, + 361 + ], + "lines": [ + { + "bbox": [ + 45, + 331, + 565, + 361 + ], + "spans": [ + { + "bbox": [ + 45, + 331, + 565, + 361 + ], + "type": "text", + "content": "Fig. 1. The overall architecture of the LAENet covers the main application scenarios, including emergency monitoring and response, temporary communication relay, communication coverage expansion, low-altitude smart logistics, and urban air mobility. The table compares the similarities and differences between the LAENet, single UAV, and UAV networks, representing the evolution of the LAENet." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 45, + 380, + 301, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 380, + 301, + 548 + ], + "spans": [ + { + "bbox": [ + 45, + 380, + 301, + 548 + ], + "type": "text", + "content": "This process encompasses encoding data into bit sequences, modulating them onto carrier waves, and ensuring reliable signal propagation through the wireless medium [32], [35], [36]. At the receiver side, the physical layer performs inverse operations, including demodulating the incoming signals, decoding the bit sequences, and passing the data to upper layers for further processing [37]–[39]. Therefore, the physical layer supports the core communication mechanisms in the LAENet and plays a crucial role in its aerial deployment. For example, aerial vehicles deployed as aerial base stations (ABSs) or aerial relays can overcome interference, signal distortion, and environmental variations inherent in communication links by using physical layer functionalities such as channel access, multiplexing, and channel equalization [33], [40], [41]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 556, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 556, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 45, + 556, + 301, + 748 + ], + "type": "text", + "content": "However, physical layer communication in the LAENet is exposed to a variety of security threats due to the inherent characteristics of aerial communication environments [42]. The broadcast nature of wireless signals and the prevalence of line-of-sight (LoS) propagation make aerial links particularly vulnerable to eavesdropping, jamming, and spoofing attacks [1], [43]. These attacks can compromise communication confidentiality, disrupt communication, or deceive aerial vehicles by impersonating legitimate transmitters [44], [45]. Furthermore, the openness of wireless channels and weak authentication mechanisms increase the risk of unauthorized access and injection attacks, allowing adversaries to infiltrate the network or inject malicious signals [46], [47]. Additionally, the open medium and dynamic spectrum access may cause anomalous behaviors to disrupt normal communication operations in the LAENet [48], [49]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 307, + 380, + 564, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 380, + 564, + 463 + ], + "spans": [ + { + "bbox": [ + 307, + 380, + 564, + 463 + ], + "type": "text", + "content": "Confronted with these substantial security challenges, this paper conducts a comprehensive analysis on physical layer communications of the LAENet and provides a thorough survey of technologies and solutions to address communication confidentiality, availability, and integrity. Table II gives a clear structure for showing existing efforts on secure physical layer communications for the LAENet." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 309, + 481, + 391, + 494 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 481, + 391, + 494 + ], + "spans": [ + { + "bbox": [ + 309, + 481, + 391, + 494 + ], + "type": "text", + "content": "A. Related Surveys" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 307, + 497, + 565, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 497, + 565, + 750 + ], + "spans": [ + { + "bbox": [ + 307, + 497, + 565, + 750 + ], + "type": "text", + "content": "Recently, a number of excellent survey and tutorial papers have overviewed security issues in UAV networks and communications and have summarized corresponding countermeasures and solutions, as shown in Table I. Some works consider security issues at the system level including intrusion, privacy, and trust issues. The work in [19] provides a comprehensive review of security threats facing UAVs and UAV networks, including communication vulnerabilities, sensor spoofing, jamming, and malware attacks. It examines various countermeasures such as encryption, global positioning system (GPS) spoofing mitigation, and firmware signing. A gap analysis is performed to identify remaining security vulnerabilities and provide recommendations for future UAV development. The study in [20] conducts a comprehensive review of security issues in UAV swarm networks, examining various potential attacks such as communication attacks, identity-based attacks, resource attacks, routing attacks, data attacks, and machine learning (ML) attacks. It categorizes these threats and presents corresponding security technologies and countermeasures, including cryptography, physical layer security techniques, blockchain, machine learning, and intrusion detection" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 49, + 82, + 298, + 421 + ], + "blocks": [ + { + "bbox": [ + 104, + 57, + 242, + 74 + ], + "lines": [ + { + "bbox": [ + 104, + 57, + 242, + 74 + ], + "spans": [ + { + "bbox": [ + 104, + 57, + 242, + 74 + ], + "type": "text", + "content": "TABLEI SUMMARY OF RELATED SURVEYS" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 49, + 82, + 298, + 421 + ], + "lines": [ + { + "bbox": [ + 49, + 82, + 298, + 421 + ], + "spans": [ + { + "bbox": [ + 49, + 82, + 298, + 421 + ], + "type": "table", + "html": "
ReferencesFocus
[19]A review of cybersecurity threats, countermeasures, and research gaps in UAV networks, with a focus on emerging attack surfaces and commercial UAV applications
[20]A survey of security threats, vulnerabilities, and countermeasures in UAV swarm networks, with a focus on classifying attack types and reviewing emerging defense technologies
[21]A review of security threats, vulnerabilities, and countermeasures in UAVs and Flying Ad Hoc Networks with attack surface analysis with simulation-based evaluation
[22]A survey of vulnerabilities across software, hardware, and communication layers in UAV systems, and an exploration of emerging defense technologies
[23]A survey of security challenges in drone communication and a review of emerging technologies used to enhance the speed, reliability, and security of UAV networks
[24]A review of UAV security challenges, existing controls, and future research directions, with an emphasis on the transformative role of AI in enabling secure UAV systems
[25]A review of security threats classified from a cyberspace security perspective and countermeasures in UAV systems
[26]A survey of security threats, requirements, and counter-measures in UAV-aided Internet of Things (IoT) applications
[27]A survey of cybersecurity vulnerabilities and countermeasures in UAV systems, integrating threat classification, communication protocols, and emerging techniques
[28]A survey of PLS in UAV communications, focusing on key challenges, methodologies, and recent advancements for both static and mobile UAV deployment scenarios
[29]A review of security challenges, practical deployment aspects, and standardization progress associated with integrating UAVs into cellular networks
[30]A survey of layer-wise cybersecurity threats and AI-enabled countermeasures in UAV-assisted IoT applications
", + "image_path": "34be38f581f31f563210090b46c7023435f7f21295c66f10111880e33a75aa87.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 447, + 301, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 447, + 301, + 567 + ], + "spans": [ + { + "bbox": [ + 45, + 447, + 301, + 567 + ], + "type": "text", + "content": "systems. The authors in [21] provide a detailed examination of security challenges in UAVs and FANETs, covering various attack vectors including communication, identity-based, resource, routing, data, and machine learning attacks. The study in [22] examines security and privacy vulnerabilities in UAV systems across hardware, software, and communication layers. It discusses various threats such as eavesdropping and jamming attacks, and presents defense mechanisms including blockchain, machine learning-based intrusion detection, and secure communication protocols." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 569, + 301, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 569, + 301, + 750 + ], + "spans": [ + { + "bbox": [ + 45, + 569, + 301, + 750 + ], + "type": "text", + "content": "Some studies emphasize cyber security challenges within UAV networks. The study in [23] comprehensively reviews security issues in drone communication, including Denial of Service (DoS), GPS spoofing, and man-in-the-middle attacks. It examines vulnerabilities across different drone applications and presents countermeasures using blockchain, software-defined networks, machine learning, and fog computing. The authors of [24] provide a comprehensive survey of security challenges in UAV systems, including various types of attacks, privacy concerns, and trust issues. It identifies current research trends and gaps while establishing a future roadmap with a focus on artificial intelligence (AI)'s potential to enhance UAV security. The authors in [25] provide a comprehensive review of security issues in UAV networks, examining various potential attacks such as spoofing, replay, jamming, and" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 419, + 57, + 454, + 65 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 419, + 57, + 454, + 65 + ], + "spans": [ + { + "bbox": [ + 419, + 57, + 454, + 65 + ], + "type": "text", + "content": "TABLE II" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 383, + 67, + 490, + 74 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 383, + 67, + 490, + 74 + ], + "spans": [ + { + "bbox": [ + 383, + 67, + 490, + 74 + ], + "type": "text", + "content": "CHALLENGES AND SOLUTIONS" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 322, + 76, + 552, + 110 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 322, + 76, + 552, + 110 + ], + "spans": [ + { + "bbox": [ + 322, + 76, + 552, + 110 + ], + "type": "text", + "content": "RED CIRCLES DESCRIBE THE SECURITY ISSUES; GREEN CIRCLES REPRESENT THE OVERALL COUNTERMEASURES FOR THE SECURITY ISSUES; GREEN CHECK MARKERS INDICATE DIFFERENT TYPES OF SOLUTIONS UNDER EACH COUNTERMEASURE" + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 310, + 118, + 563, + 392 + ], + "blocks": [ + { + "bbox": [ + 310, + 118, + 563, + 392 + ], + "lines": [ + { + "bbox": [ + 310, + 118, + 563, + 392 + ], + "spans": [ + { + "bbox": [ + 310, + 118, + 563, + 392 + ], + "type": "table", + "html": "
Section III, Challenge 1: Communication confidentiality
Issues● Eavesdropping attack [46], [50]● Unauthorized access [31], [51], [52]
Solutions● Anti-eavesdropping strategies√ Convex optimization-based strategies [53]–[59]√ Reinforcement learning-based strategies [60]–[65]√ Deep learning-based strategies [66]–[71]● Communication authentication√ PUFs-based authentication [72]–[74]√ Channel based-authentication [75]–[77]
Section IV, Challenge 2: Communication availability
Issues● Jamming attack [48], [78], [79]● Spoofing attack [49], [50], [52], [78]
Solutions● Anti-jamming strategies√ Convex optimization [80]–[82]√ Single-agent RL [83]–[86]√ Multi-agent RL [87]–[89]● Spoofing defense√ PLA [77], [90], [91]√ GNSS spoofing detection [92]–[94]
Section V, Challenge 3: Communication Integrity
Issues● Anomalous behaviors [61], [95], [96]● Injection attacks [28], [46], [97]
Solutions● Anomaly detection√ Jamming anomaly detection [98]–[101]√ Abnormal power detection [102]√ Eavesdropping anomaly detection [103]● Injection defense√ Jamming signal injection defense [98], [101], [104]● Spoofing signal injection defense [105]–[107]
", + "image_path": "59767b138893846c4828b5215fcedab577ef94ef050884296e36a852a110260c.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 307, + 431, + 564, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 431, + 564, + 515 + ], + "spans": [ + { + "bbox": [ + 307, + 431, + 564, + 515 + ], + "type": "text", + "content": "eavesdropping attacks. It categorizes these threats and presents corresponding security technologies and countermeasures. The study in [26] provides a comprehensive review of security issues in UAV-aided IoT applications and presents corresponding security technologies and countermeasures. The work in [27] reviews cybersecurity threats affecting UAV systems and evaluates existing countermeasures in enhancing UAV security." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 307, + 521, + 564, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 521, + 564, + 749 + ], + "spans": [ + { + "bbox": [ + 307, + 521, + 564, + 749 + ], + "type": "text", + "content": "In addition, some surveys analyze the challenges faced by UAV systems from a layered perspective (e.g., physical layer, link layer, network layer, application layer). The work in [28] deeply reviews the current state of physical layer security (PLS) in UAV communications, examining unique air-to-ground channel characteristics, static and mobile UAV deployment scenarios, and various security enhancement techniques. The work in [29] presents a comprehensive overview of UAV cellular communications, covering the classification of consumer drones, the concept and potential of UAV-mounted flying base stations. It explores the integration of UAVs into cellular networks as novel user equipment and addresses key challenges related to interference, regulatory compliance, and security. The authors of [30] review the cybersecurity landscape of UAV-assisted IoT applications, examining layer-wise security threats from physical to application layers. It explores how AI, ML, deep learning (DL), and reinforcement learning (RL) techniques have been employed to address authentication, data privacy, and attack prevention challenges." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 558, + 25, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 25, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 25, + 563, + 32 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 111, + 82, + 500, + 331 + ], + "blocks": [ + { + "bbox": [ + 286, + 57, + 324, + 64 + ], + "lines": [ + { + "bbox": [ + 286, + 57, + 324, + 64 + ], + "spans": [ + { + "bbox": [ + 286, + 57, + 324, + 64 + ], + "type": "text", + "content": "TABLE III" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 253, + 65, + 357, + 75 + ], + "lines": [ + { + "bbox": [ + 253, + 65, + 357, + 75 + ], + "spans": [ + { + "bbox": [ + 253, + 65, + 357, + 75 + ], + "type": "text", + "content": "LIST OF ABBREVIATIONS" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 111, + 82, + 500, + 331 + ], + "lines": [ + { + "bbox": [ + 111, + 82, + 500, + 331 + ], + "spans": [ + { + "bbox": [ + 111, + 82, + 500, + 331 + ], + "type": "table", + "html": "
AbbreviationDescriptionAbbreviationDescription
A2GAir-to-groundABSAerial Base Station
ANArtificial NoiseAIArtificial Intelligence
BCDBlock Coordinate DescentBSBase Station
CNNConvolutional Neural NetworkCSIChannel State Information
DDPGDeep Deterministic Policy GradientDDQNDouble-deep Q-Learning
DLDeep LearningDNNDeep Neural Network
DQNDeep Q-NetworkeVTOLElectric Vertical Take-off and Landing
DRLDeep Reinforcement LearningFARFalse Alarm Rate
G2AGround-to-airG2UGround-to-UAV
GANGenerative Adversarial NetworkGNSSGlobal Navigation Satellite System
GPSGlobal Positioning SystemGSGround Station
IoTInternet of ThingsLAENetLow-Altitude Economy Networking
LSTMLong Short-Term MemoryLoSLine-of-sight
MARLMulti-agent Reinforcement LearningMDPMarkov Decision Process
MDRMiss Detection RateMECMobile Edge Computing
MLMachine LearningMSEMean Square Error
NOMANon-orthogonal Multiple AccessPLAPhysical-layer Authentication
PLSPhysical Layer SecurityPUFPhysical Unclonable Function
QoEQuality of ExperienceRFRadio Frequency
RISReconfigurable Intelligent SurfacesRLReinforcement Learning
RNNRecurrent Neural NetworkRSSReceived Signal Strength
SCASuccessive Convex ApproximationSDNRSignal-to-disturbance-plus-noise Ratio
SNRSignal-to-noise RatioSOCSecond-Order Cone
TDMATime-division Multiple AccessTHzTerahertz
U2GUAV-to-ground CommunicationUAVUnmanned Aerial Vehicle
", + "image_path": "d546a765ef9d9111da613533a636f1121d9e0b344a8fa8ca2f8b74b4bc8c977f.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 351, + 180, + 363 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 351, + 180, + 363 + ], + "spans": [ + { + "bbox": [ + 46, + 351, + 180, + 363 + ], + "type": "text", + "content": "B. Contributions of Our Survey" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 371, + 301, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 371, + 301, + 562 + ], + "spans": [ + { + "bbox": [ + 45, + 371, + 301, + 562 + ], + "type": "text", + "content": "The related surveys and tutorials primarily focus on the classification of overall security threats and corresponding countermeasures in UAV networks or UAV-assisted applications, with relatively little attention given to security issues of communication in the physical layer. Different from existing studies, our survey uniquely concentrates on the security challenges specific to physical layer communications in the LAENet, as summarized in Table II. It fills a critical gap in the literature by conducting an in-depth analysis of threats in physical layer communications that were previously underexplored or only briefly mentioned in prior studies. By offering a comprehensive and systematic analysis of these underexplored issues, our work brings new insights to seek effective solutions to enhance physical layer security in communications of the LAENet. The key contributions of this paper are summarized as follows:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 563, + 300, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 563, + 300, + 587 + ], + "spans": [ + { + "bbox": [ + 45, + 563, + 300, + 587 + ], + "type": "text", + "content": "The key contributions of this paper are summarized as follows:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 593, + 301, + 750 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 55, + 593, + 299, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 593, + 299, + 700 + ], + "spans": [ + { + "bbox": [ + 55, + 593, + 299, + 700 + ], + "type": "text", + "content": "- A thorough discussion of the six main security issues in the physical layer communication of the LAENet is presented, namely, eavesdropping attack, unauthorized access, jamming attack, spoofing attack, anomalous behaviors, and injection attack. We analyze these attacks in the context of their potential occurrence throughout the entire operation of LAENet, providing essential references for ensuring the security of physical layer communication in the future LAENet deployments." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 700, + 301, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 700, + 301, + 750 + ], + "spans": [ + { + "bbox": [ + 56, + 700, + 301, + 750 + ], + "type": "text", + "content": "- We review countermeasures against various attacks in detail and offer a comprehensive tutorial on achieving communication confidentiality, communication availability, and communication integrity in LAENet. In addition," + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 328, + 351, + 564, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 328, + 351, + 564, + 387 + ], + "spans": [ + { + "bbox": [ + 328, + 351, + 564, + 387 + ], + "type": "text", + "content": "the lessons learned for each security issue are presented to emphasize the limitations of existing works and provide high-level insights for improvements." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 319, + 388, + 564, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 388, + 564, + 495 + ], + "spans": [ + { + "bbox": [ + 319, + 388, + 564, + 495 + ], + "type": "text", + "content": "- Several potential future research directions for secure physical layer communication in LAENet are proposed, including energy-efficient physical layer security, multi-drone collaboration for secure communication, AI-driven security defense strategy, space-air-ground integrated security architecture, and 6G-enabled secure UAV communication. These diverse perspectives offer new guidance for future research on secure physical layer communication in LAENet." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 308, + 495, + 564, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 495, + 564, + 628 + ], + "spans": [ + { + "bbox": [ + 308, + 495, + 564, + 628 + ], + "type": "text", + "content": "The remainder of this paper is given as follows. Section II introduces the background of the LAENet and security issues in physical layer communication of the LAENet. In Section III, a comprehensive exploration of achieving communication confidentiality for the LAENet is presented. Section IV reviews the solutions for communication availability in the LAENet. In Section V, countermeasures on communication integrity for the LAENet are discussed. Section VI provides future research directions, and Section VII concludes this paper. Additionally, Table III lists the abbreviations commonly employed throughout this survey." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 366, + 636, + 506, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 366, + 636, + 506, + 647 + ], + "spans": [ + { + "bbox": [ + 366, + 636, + 506, + 647 + ], + "type": "text", + "content": "II. BACKGROUND KNOWLEDGE" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 308, + 651, + 564, + 699 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 651, + 564, + 699 + ], + "spans": [ + { + "bbox": [ + 308, + 651, + 564, + 699 + ], + "type": "text", + "content": "In this section, we introduce the background of the LAENet, including its definition and application scenarios. Subsequently, the concept of physical layer communication in the LAENet and its security threats are elaborated in detail." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 309, + 710, + 421, + 721 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 710, + 421, + 721 + ], + "spans": [ + { + "bbox": [ + 309, + 710, + 421, + 721 + ], + "type": "text", + "content": "A. Background of LAENet" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 724, + 564, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 724, + 564, + 749 + ], + "spans": [ + { + "bbox": [ + 308, + 724, + 564, + 749 + ], + "type": "text", + "content": "The LAENet is a sophisticated and dynamic system that integrates various aerial and terrestrial technologies to en" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 558, + 25, + 563, + 31 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 25, + 563, + 31 + ], + "spans": [ + { + "bbox": [ + 558, + 25, + 563, + 31 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 56, + 561, + 339 + ], + "blocks": [ + { + "bbox": [ + 50, + 56, + 561, + 339 + ], + "lines": [ + { + "bbox": [ + 50, + 56, + 561, + 339 + ], + "spans": [ + { + "bbox": [ + 50, + 56, + 561, + 339 + ], + "type": "image", + "image_path": "1411e6ebf9ab0f74ca0614dc20359a8ea382871faee714eff22b4eddae00662e.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 350, + 566, + 389 + ], + "lines": [ + { + "bbox": [ + 45, + 350, + 566, + 389 + ], + "spans": [ + { + "bbox": [ + 45, + 350, + 566, + 389 + ], + "type": "text", + "content": "Fig. 2. Background knowledge of the LAENet and security issues in its physical layer communication. Describe the definition of the LAENet and its communication application scenarios. Elaborate on three key metrics for secure physical layer communication: communication confidentiality, which combats eavesdropping attacks and unauthorized access; anti-jamming strategies and spoofing defense for ensuring communication availability; and anomaly detection and injection defense to prevent adversaries from compromising communication integrity." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 45, + 408, + 301, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 408, + 301, + 552 + ], + "spans": [ + { + "bbox": [ + 45, + 408, + 301, + 552 + ], + "type": "text", + "content": "able seamless communication, coordination, and management of diverse aerial operations within low-altitude airspace [1], [108]. The LAENet includes numerous different types of constituents, such as flight equipment, base stations, and other communication platforms. Specifically, the LAENet connects various aerial vehicles, including general aviation aircraft for passenger transport and emergency rescue, drones for surveillance and logistics, and eVTOL designed for urban air mobility and last-mile cargo delivery [109], [110]. These aerial vehicles can incorporate ground and aerial base stations, further high-altitude platforms, such as weather balloons and satellites, to receive environmental information and precise navigation [13]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 557, + 301, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 557, + 301, + 749 + ], + "spans": [ + { + "bbox": [ + 45, + 557, + 301, + 749 + ], + "type": "text", + "content": "Different from traditional aviation networks that rely on centralized air traffic control, the LAENet can independently construct communication and networking by seamlessly interconnecting a variety of aerial and ground-based systems, which enables continuous information exchange, flight path optimization, and autonomous operations [8], [111]. Therefore, the LAENet has opened opportunities for various application scenarios and plays key roles from the perspective of communication coverage and relay [112]–[114]. Specifically, the LAENet can extend the communication coverage by deploying aircraft as ABSs in areas lacking communication infrastructure [115]–[117]. For instance, these ABSs deployed at optimal altitudes can provide connectivity and network services in remote or disaster-stricken areas [118], [119]. Moreover, if the direct communication links between ground base stations and user equipment are unreliable, such as in" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 307, + 408, + 563, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 408, + 563, + 491 + ], + "spans": [ + { + "bbox": [ + 307, + 408, + 563, + 491 + ], + "type": "text", + "content": "mountainous regions and densely populated areas, the aircraft can act as mobile relays to improve connectivity by capturing, amplifying, and transmitting communication signals [120]–[122]. It also can be regarded as a surveillance unit to monitor airspace dynamics while simultaneously functioning as a low-altitude network orchestrator to optimize communication and computing resources [118], [123], [124]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 307, + 493, + 564, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 493, + 564, + 661 + ], + "spans": [ + { + "bbox": [ + 307, + 493, + 564, + 661 + ], + "type": "text", + "content": "To integrate and evolve these capabilities, the LAENet needs to establish effective communication infrastructure to ensure reliable connectivity and efficient interaction across various environments [31], [125]. Physical layer communication, as the bottom layer in the network architecture, may directly influence the communication performance of the LAENet across aerial and terrestrial networks [43], [46]. For example, it governs how signals are generated, transmitted, and received between aircraft and base stations [31]. Building on this, it manages the channel and spectrum resources to enhance signal transmission quality and maintain stable connectivity [43]. Therefore, ensuring the security of physical layer communication in the LAENet is crucial for supporting a wide range of applications in low-altitude domains." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 308, + 683, + 565, + 705 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 683, + 565, + 705 + ], + "spans": [ + { + "bbox": [ + 308, + 683, + 565, + 705 + ], + "type": "text", + "content": "B. Security Issues in Physical Layer Communication of LAENet" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 308, + 712, + 564, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 712, + 564, + 749 + ], + "spans": [ + { + "bbox": [ + 308, + 712, + 564, + 749 + ], + "type": "text", + "content": "Based on previous studies [126], [127], we discuss the security issues in the physical layer communication of the LAENet from three aspects: confidentiality, availability, and" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 55, + 299, + 77 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 55, + 299, + 77 + ], + "spans": [ + { + "bbox": [ + 48, + 55, + 299, + 77 + ], + "type": "text", + "content": "integrity of communications. The details of each measurement are described as follows." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 102, + 299, + 604 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 58, + 102, + 299, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 102, + 299, + 257 + ], + "spans": [ + { + "bbox": [ + 58, + 102, + 299, + 257 + ], + "type": "text", + "content": "- The confidentiality of physical layer communications in the LAENet can be compromised by security threats such as eavesdropping and unauthorized access [128]. Eavesdropping arises primarily from the broadcast nature of wireless signals and LoS link, making transmissions highly susceptible to interception [46]. An eavesdropper silently capturing or intercepting signals can lead to the exposure of confidential information. Meanwhile, unauthorized access threats exploit the open and broadcast nature of UAV communications [31]. Attackers may gain illegal access to the LAENet by disguising themselves as legitimate UAVs or ground stations, thereby deceiving or interfering with the normal operation of UAVs [51]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 58, + 258, + 299, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 258, + 299, + 399 + ], + "spans": [ + { + "bbox": [ + 58, + 258, + 299, + 399 + ], + "type": "text", + "content": "- Similarly, the open nature of wireless channels and LoS propagation bring jamming and spoofing security issues for communication availability [78]. Specifically, jammers can continuously transmit interference signals to disrupt communication, where a jammer can be a drone or a base station [48]. The spoofing attack can not only achieve identity spoofing by forging legitimate transmission identities but also launch signal deception attacks to disrupt UAV communications and positioning [49]. Therefore, jamming and spoofing lead to unauthorized access and signal disruptions or errors, making communication unavailable in the LAENet." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 58, + 402, + 299, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 402, + 299, + 604 + ], + "spans": [ + { + "bbox": [ + 58, + 402, + 299, + 604 + ], + "type": "text", + "content": "- Integrity as a microscopic metric measures the deviations of signals, channels, and spectrum in communication under adversaries' influence [129]. The communication integrity of the LAENet can be affected by anomalous behaviors and injection attacks. Anomalous behaviors often use dynamic spectrum access and the open wireless medium, including abnormal jamming, abnormal transmission power, and covert eavesdropping [95]. These anomalous behaviors can introduce harmful interference, violate spectrum policies, and expose sensitive information to eavesdroppers [61], [96]. Moreover, the injection attack exploits the open nature of wireless channels to alter signals or inject illegal signals, such as spoofing signals or malicious GNSS signals, to deceive receivers and interfere with communication, thereby leading to degraded signal quality, false navigation, and network congestion [28], [46], [97]." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 48, + 628, + 299, + 747 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 628, + 299, + 747 + ], + "spans": [ + { + "bbox": [ + 48, + 628, + 299, + 747 + ], + "type": "text", + "content": "Overall, as illustrated in Fig. 2, this survey reviews existing research on achieving communication confidentiality, availability, and integrity for the LAENet. Specifically, the investigation of anti-jamming strategies and communication authentication schemes aims to enhance communication confidentiality. Studies on anti-jamming techniques and spoofing defense mechanisms have been explored to ensure communication availability. Furthermore, research on communication integrity has focused on anomaly detection and injection attack mitigation approaches." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 317, + 55, + 556, + 65 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 55, + 556, + 65 + ], + "spans": [ + { + "bbox": [ + 317, + 55, + 556, + 65 + ], + "type": "text", + "content": "III. COMMUNICATION CONFIDENTIALITY FOR LAENET" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 310, + 73, + 440, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 73, + 440, + 84 + ], + "spans": [ + { + "bbox": [ + 310, + 73, + 440, + 84 + ], + "type": "text", + "content": "A. Anti-eavesdropping Strategy" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 310, + 89, + 563, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 89, + 563, + 268 + ], + "spans": [ + { + "bbox": [ + 310, + 89, + 563, + 268 + ], + "type": "text", + "content": "The LAENet faces significant eavesdropping threats due to the inherent vulnerabilities of UAV-enabled wireless communications. The openness of wireless channels, especially the LoS links in air-to-ground (A2G) and ground-to-air (G2A) communications, increases susceptibility to interception by eavesdroppers that disrupt legitimate communications compared to traditional terrestrial channels [50]. Traditional cryptographic methods, while effective in many scenarios, are less suitable for UAV communications due to their computational complexity and the dynamic mobility of UAVs [130]. This highlights the critical need for robust security measures to ensure the confidentiality and reliability of the LAENet communications. To address these limitations, leveraging PLS techniques to counter eavesdropping threats effectively has emerged as a promising solution [131]-[134]." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 310, + 269, + 563, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 269, + 563, + 448 + ], + "spans": [ + { + "bbox": [ + 310, + 269, + 563, + 448 + ], + "type": "text", + "content": "In the LAENet, anti-eavesdropping solutions can leverage the controllable mobility of low-altitude aircraft to enhance physical layer security. By dynamically optimizing their trajectories, low-altitude aircraft can actively adapt their flight paths to shape the communication environment [135]. This approach allows them to fly closer to legitimate ground nodes, strengthening communication links and improving channel conditions for intended receivers, while simultaneously distancing themselves from potential eavesdroppers. In this subsection, we present a critical role of UAV trajectory in forming the communication environment, and how PLS can be enhanced through trajectory optimization and resource allocation to mitigate eavesdropping risks. Our analysis focuses on three prominent methodologies in this domain: convex optimization, deep learning, and reinforcement learning." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 310, + 449, + 563, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 449, + 563, + 628 + ], + "spans": [ + { + "bbox": [ + 310, + 449, + 563, + 628 + ], + "type": "text", + "content": "Convex optimization plays a crucial role in addressing anti-eavesdropping challenges in UAV-enabled communication networks, particularly for solving the joint optimization of trajectory and resource allocation [137]. Due to the inherent non-convex nature of these problems, advanced convex optimization techniques such as Successive Convex Approximation (SCA) and Block Coordinate Descent (BCD) are widely utilized [135]. These methods enable UAVs to enhance physical layer security by optimizing flight paths and resource utilization, minimizing the risk of eavesdropping while ensuring secure and efficient communication. Additionally, the decision variables may be discrete, which requires the application of various relaxation methods to transform the complex optimization problem into a more tractable form to obtain efficient solutions [138]." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 310, + 628, + 563, + 747 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 628, + 563, + 747 + ], + "spans": [ + { + "bbox": [ + 310, + 628, + 563, + 747 + ], + "type": "text", + "content": "The study in [53] explores physical-layer security in UAV-assisted Mobile Edge Computing (MEC) systems in the presence of multiple ground-based eavesdroppers. The proposed system utilizes dual UAVs for task execution and anti-eavesdropping measures. One UAV operates as a mobile MEC server, while the other emits jamming signals to disrupt eavesdroppers, as shown in Fig. 3. The time-division multiple access (TDMA) scheme and non-orthogonal multiple access (NOMA) scheme are proposed to maximize the minimum secure computing capacity by jointly optimizing communica" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 558, + 25, + 563, + 31 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 25, + 563, + 31 + ], + "spans": [ + { + "bbox": [ + 558, + 25, + 563, + 31 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 47, + 92, + 563, + 357 + ], + "blocks": [ + { + "bbox": [ + 50, + 56, + 559, + 84 + ], + "lines": [ + { + "bbox": [ + 50, + 56, + 559, + 84 + ], + "spans": [ + { + "bbox": [ + 50, + 56, + 559, + 84 + ], + "type": "text", + "content": "TABLE IV SUMMARY OF CONVEX OPTIMIZATION FOR ANTI-EAVESDROPPING STRATEGY CIRCLES DESCRIBE THE METHODS; CORRECT MARKERS AND CROSS MARKERS REPRESENT PROS AND CONS RESPECTIVELY." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 47, + 92, + 563, + 357 + ], + "lines": [ + { + "bbox": [ + 47, + 92, + 563, + 357 + ], + "spans": [ + { + "bbox": [ + 47, + 92, + 563, + 357 + ], + "type": "table", + "html": "
RefOptimization ObjectivesEavesdropper and Jammer TypeOptimizationConstraintsPros & Cons
[53]Secure calculation capacity1UAV jammer and fixed ground eavesdropperTransmit power, time allocation, and computation capacityBCD and P-BCD for secure calculation capacity maximization\n✓ Secure capacity of NOMA and TDMA has been significantly improved\nX High complexity for NOMA due to dual-loop iterations
[54]Secure calculation capacityBase station jammer and fixed ground eavesdropperTransmission power, time allocation, and CPU processing frequencyJDPB algorithm with SCA and BCD for secure task offloading\n✓ Reduce complexity via region division\nX Fixed UAV altitude limits 3D trajectory optimization
[55]Average secrecy rate2Antenna jammer and fixed aerial eavesdropperTransmit power and jamming powerBCD and SCA optimization with hybrid FSO/RF links\n✓ Enhance communication security via hybrid FSO/RF links and AN\nX Rely on simplified channel models (e.g., free-space path loss)
[56]Worst-case secrecy rateUAV jammer and fixed ground eavesdropperUAV speed, collision avoidance, positioning error, and energy harvestingRobust 3D trajectory and time switching optimization\n✓ Full mobility of UAVs in 3D for improving secrecy rate\nX The performance may degrade with flying eavesdroppers
[57]Average secrecy rateNone and flying eavesdropperTransmit power control and user schedulingJoint trajectory and communication design against mobile eavesdroppers\n✓ Initial trajectory design for keeping away from eavesdroppers\nX Security performance relies on the initial trajectory design
[58]Secure calculation capacityGround jammer and flying eavesdropperTransmit power, time slot, computation capacity, UAV speed, and collision avoidanceIntegrate a dual-UAV system with a ground jammer in MEC\n✓ Incorporate the UAV server and UAV eavesdropper with a ground jammer\n✓ Allow a UAV server to hover near ground users for secure offloading\nX Numerous flight constraints may require extensive tuning
[59]Secrecy rateCoastal jammer and flying eavesdropperTransmit power, time slot, computation capacity, UAV speed, and collision avoidanceA secure communication for UAV-relay-assisted maritime MEC\n✓ Simultaneously optimize multiple parameters for improved secrecy rate\nX Iterative decomposition increases the computational burden\nX Assume prior knowledge of Channel State Information (CSI) of devices
", + "image_path": "c4e8fccf864b7793ee0ff85b396ae3929eca2320ee801b9671f6ac1ac3fdaf87.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 48, + 357, + 422, + 366 + ], + "lines": [ + { + "bbox": [ + 48, + 357, + 422, + 366 + ], + "spans": [ + { + "bbox": [ + 48, + 357, + 422, + 366 + ], + "type": "text", + "content": "1Secure calculation capacity is defined as the average number of secure calculation bits in UAV flying time [54]." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_footnote" + }, + { + "bbox": [ + 47, + 365, + 518, + 376 + ], + "lines": [ + { + "bbox": [ + 47, + 365, + 518, + 376 + ], + "spans": [ + { + "bbox": [ + 47, + 365, + 518, + 376 + ], + "type": "text", + "content": "2Secrecy rate is defined as the difference between the achievable rate of legitimate UAV's channel and the rate of eavesdropper channel [136]." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 396, + 301, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 396, + 301, + 696 + ], + "spans": [ + { + "bbox": [ + 45, + 396, + 301, + 696 + ], + "type": "text", + "content": "tion resources, computation resources, and UAV trajectories. To address the non-convexity of the optimization problem, the problem is transformed into tractable forms via auxiliary variables and decomposition. Specifically, for the TDMA scheme, the problem is decoupled into two sub-problems using BCD. The communication and computation resources are optimized via second-order cone (SOC) constraints and SCA, while UAV trajectories are iteratively updated via first-order Taylor approximations to handle non-convex terms. For the NOMA scheme, a penalized BCD (P-BCD) algorithm is proposed to tackle binary constraints. The problem is split into three blocks that are penalty parameter adjustment, resource allocation via SOC and SCA, and trajectory optimization with convex relaxations. The experimental results demonstrate that the proposed algorithms significantly enhance secure computing capacity, with the NOMA scheme achieving up to about 4.3 Mbps and the TDMA scheme reaching about 4.2 Mbps under optimal conditions. Compared to baselines including the straight flight design and no power control, the proposed strategies improve secure computing capacity by about " + }, + { + "bbox": [ + 45, + 396, + 301, + 696 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 45, + 396, + 301, + 696 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 45, + 396, + 301, + 696 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 45, + 396, + 301, + 696 + ], + "type": "text", + "content": ", particularly in scenarios with lower power budgets (e.g., 0.2 W) and higher required computing bits (e.g., 1 Mbps). The convergence of the algorithms is achieved within 20 iterations, which indicates the efficiency in optimizing UAV trajectories and resource allocation for anti-eavesdropping." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 700, + 301, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 700, + 301, + 750 + ], + "spans": [ + { + "bbox": [ + 45, + 700, + 301, + 750 + ], + "type": "text", + "content": "The study in [53] mainly focuses on a dual-UAV-assisted secure MEC system. In some cases, multi-UAV systems hold great promise for collaboratively executing complex tasks while enhancing the secure communications [49], [54]. In the" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 307, + 396, + 564, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 396, + 564, + 731 + ], + "spans": [ + { + "bbox": [ + 307, + 396, + 564, + 731 + ], + "type": "text", + "content": "work [54], the joint optimization of task offloading, trajectory planning, and resource allocation for secure communications in multi-UAV MEC systems is studied. Firstly, a base station emits jamming signals to protect against fixed-location ground eavesdroppers. Then, it investigated the joint optimization of task offloading, trajectory planning, and resource allocation for secure communications in multi-UAV MEC systems. The problem is decomposed into two sub-problems: (1) resource allocation and trajectory planning, addressed via SCA and BCD algorithms; (2) offloading decisions, solved through Joint Dynamic Programming and Bidding (JDPB) method. For the first sub-problem, non-convex constraints related to transmission power and UAV trajectory are transformed into convex forms using first-order Taylor expansion and relaxation techniques. Specifically, the transmission power optimization sub-problem is approximated via SCA, while the trajectory planning sub-problem is iteratively solved by introducing auxiliary variables and convex approximations. For the second sub-problem, a bidding mechanism is integrated with dynamic programming to reduce computational complexity by grouping dynamic users into sub-regions. The experimental results demonstrate that the proposed JDPB algorithm achieves a sum average secure calculation capacity of 10.1 Mbps in the first time slot. Additionally, under different settings of time slot sizes, transmission power, and flying speed, the sum average secure calculation capacity achieved by JDPB consistently outperforms baseline schemes such as the Greedy Strategy and the Random Strategy." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 318, + 735, + 564, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 735, + 564, + 749 + ], + "spans": [ + { + "bbox": [ + 318, + 735, + 564, + 749 + ], + "type": "text", + "content": "Unlike the above studies that deal with ground eavesdrop-" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 60, + 295, + 201 + ], + "blocks": [ + { + "bbox": [ + 49, + 60, + 295, + 201 + ], + "lines": [ + { + "bbox": [ + 49, + 60, + 295, + 201 + ], + "spans": [ + { + "bbox": [ + 49, + 60, + 295, + 201 + ], + "type": "image", + "image_path": "b0ced301d684649bd66f097e1413eaf4b7a34732744caf3707d8fc50381c1366.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 49, + 219, + 289, + 355 + ], + "blocks": [ + { + "bbox": [ + 49, + 219, + 289, + 355 + ], + "lines": [ + { + "bbox": [ + 49, + 219, + 289, + 355 + ], + "spans": [ + { + "bbox": [ + 49, + 219, + 289, + 355 + ], + "type": "image", + "image_path": "95b7178677e3e6554d2277956eb4b90047bdfe2645c90f9834611a75b83e6307.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 376, + 301, + 452 + ], + "lines": [ + { + "bbox": [ + 45, + 376, + 301, + 452 + ], + "spans": [ + { + "bbox": [ + 45, + 376, + 301, + 452 + ], + "type": "text", + "content": "Fig. 3. The overall architecture of the anti-eavesdropping strategy. Part A illustrates the system model against fixed ground eavesdroppers. In this setup, one UAV operates as a mobile server, while another UAV serves as a jammer to emit jamming signals to disrupt the eavesdroppers' interception capabilities. Part B presents the system model for flying eavesdroppers, where one UAV acts as the server, and another UAV functions as a mobile eavesdropper. To mitigate eavesdropping risks, a ground-based jammer actively emits interference signals to secure communications." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 473, + 303, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 473, + 303, + 750 + ], + "spans": [ + { + "bbox": [ + 45, + 473, + 303, + 750 + ], + "type": "text", + "content": "pers, the work in [55] targets threats from aerial eavesdroppers and explores secure communication in a hybrid Free Space Optical (FSO) and Radio Frequency (RF) system. The UAV acts as both a relay and a jammer, emitting artificial noise (AN) during RF transmission to confuse a fixed-position aerial eavesdropper. The work introduces a novel perspective on protecting space-air-ground networks from eavesdropping by leveraging FSO for its inherent resistance to interception and jointly optimizing trajectory design and power allocation to maximize the secrecy rate with two transmission schemes. The first scheme is the slot-based scheme for delay-sensitive data. The trajectory sub-problem is convexified using first-order Taylor expansion to approximate elevation angle and channel gain constraints, while the power allocation sub-problem is transformed into a convex form by introducing a lower bound on transmit power to ensure convexity. The second scheme is the period-based scheme for delay-insensitive data, in which the relaxed constraints on sum secrecy rates over the entire flight period are adopted. A similar SCA method [54] is applied to convexly approximate the non-convex terms in the constraints. Compared to benchmark schemes without jamming power optimization, both methods achieve approximately 0.4 Mbps higher secrecy rates by integrating AN transmission" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 309, + 55, + 419, + 65 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 55, + 419, + 65 + ], + "spans": [ + { + "bbox": [ + 309, + 55, + 419, + 65 + ], + "type": "text", + "content": "and hybrid FSO/RF links." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 307, + 67, + 564, + 294 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 67, + 564, + 294 + ], + "spans": [ + { + "bbox": [ + 307, + 67, + 564, + 294 + ], + "type": "text", + "content": "It is worth noting that most existing studies consider optimizing UAV trajectories on a 2D plane. However, optimizing UAV 3D trajectories may be more practical [139]. The study in [56] considers the UAV's 3D flight trajectory and imperfect knowledge of eavesdroppers' locations, while formulating an optimization approach to maximize the worst-case secrecy rate under various practical constraints, including maximum UAV speed, UAV collision avoidance, UAV positioning error, and UAV energy harvesting. To address the non-convexity of the optimization problem, the original problem is decomposed into multiple sub-problems using BCD and SCA techniques similar to studies in [54] and [55]. By incorporating the additional degree of freedom in the vertical dimension, the proposed approach improves the ability to avoid fixed eavesdropping zones, outperforming 2D trajectory models in maintaining secure communication links under dynamic conditions. Simulation results show that the average secrecy rate of the proposed 3D optimization scheme outperforms that of the fixed-height 2D benchmarks (set at " + }, + { + "bbox": [ + 307, + 67, + 564, + 294 + ], + "type": "inline_equation", + "content": "100\\mathrm{m}" + }, + { + "bbox": [ + 307, + 67, + 564, + 294 + ], + "type": "text", + "content": ") by over " + }, + { + "bbox": [ + 307, + 67, + 564, + 294 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 307, + 67, + 564, + 294 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 307, + 294, + 564, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 294, + 564, + 425 + ], + "spans": [ + { + "bbox": [ + 307, + 294, + 564, + 425 + ], + "type": "text", + "content": "Unlike the above studies that focus on fixed ground eavesdroppers, mobile eavesdroppers, such as hostile UAVs, introduce more complex threats due to their ability to maneuver, track, and position for intercept communications [22], [57]. For example, the authors in [57] address the challenges caused by a flying eavesdropper that exploits UAV LOS communication. This work focuses on jointly optimizing the UAV's trajectory, transmit power control, and user scheduling to maximize the minimum average secrecy rate, which enables dynamic adjustments to ensure secure communication even against an mobile eavesdropper." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 307, + 426, + 564, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 426, + 564, + 700 + ], + "spans": [ + { + "bbox": [ + 307, + 426, + 564, + 700 + ], + "type": "text", + "content": "Compared to the anti-eavesdropping strategies in [57] that rely heavily on accurate trajectory optimization and resource allocation, the studies in [58], [59] propose using a jammer to actively emit jamming signals, effectively reducing the interception capability of flying eavesdroppers during the computational offloading process of relay UAVs, as shown in Fig. 3. Meanwhile, with the support of SCA and BCD methods similar to [56], the joint optimization problem of UAV trajectories, resource allocation (including transmit power, time slot allocation, and computation capacity), and jamming strategies can be solved while ensuring practical constraints such as flight speed and anti-collision requirements. Importantly, compared to systems targeting fixed ground eavesdroppers, the works in [58], [59] enhance secure calculation capacity or secrecy rate by modeling the trajectories of both the relay UAV and the mobile eavesdropper as dynamic variables optimized over discrete time slots. Specifically, simulation results in [58] demonstrate that the secure calculation capacity of the proposed scheme converges to approximately 2.78 Mbps within 4 iterations, which is significantly higher than the baseline strategy (where only the location of the relay UAV, transmit power, and jamming power are optimized) by approximately 1.6 Mbps." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 308, + 700, + 565, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 700, + 565, + 750 + ], + "spans": [ + { + "bbox": [ + 308, + 700, + 565, + 750 + ], + "type": "text", + "content": "Lesson Learned. Convex optimization has emerged as a fundamental tool for developing anti-eavesdropping strategies in UAV-enabled communication systems, particularly for addressing the inherent non-convexity of joint trajectory and" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 52, + 71, + 344, + 178 + ], + "blocks": [ + { + "bbox": [ + 51, + 57, + 156, + 66 + ], + "lines": [ + { + "bbox": [ + 51, + 57, + 156, + 66 + ], + "spans": [ + { + "bbox": [ + 51, + 57, + 156, + 66 + ], + "type": "text", + "content": "Part A. DDQN-based Scheme" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 52, + 71, + 344, + 178 + ], + "lines": [ + { + "bbox": [ + 52, + 71, + 344, + 178 + ], + "spans": [ + { + "bbox": [ + 52, + 71, + 344, + 178 + ], + "type": "image", + "image_path": "45d7c87df94084b85e80ffec3b31f41bc270ce4f3ed9cdb12fa2a0d42e0bd614.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 363, + 73, + 553, + 175 + ], + "blocks": [ + { + "bbox": [ + 358, + 57, + 460, + 66 + ], + "lines": [ + { + "bbox": [ + 358, + 57, + 460, + 66 + ], + "spans": [ + { + "bbox": [ + 358, + 57, + 460, + 66 + ], + "type": "text", + "content": "Part B. DDPG-based Scheme" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 363, + 73, + 553, + 175 + ], + "lines": [ + { + "bbox": [ + 363, + 73, + 553, + 175 + ], + "spans": [ + { + "bbox": [ + 363, + 73, + 553, + 175 + ], + "type": "image", + "image_path": "50ec81d8ac805f5b51db78cda3503bc04eb8a6e427ed0449fb7c44c1037e1683.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 53, + 204, + 333, + 306 + ], + "blocks": [ + { + "bbox": [ + 52, + 190, + 182, + 198 + ], + "lines": [ + { + "bbox": [ + 52, + 190, + 182, + 198 + ], + "spans": [ + { + "bbox": [ + 52, + 190, + 182, + 198 + ], + "type": "text", + "content": "Part D. MAPPO-LSTM-based Scheme" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 53, + 204, + 333, + 306 + ], + "lines": [ + { + "bbox": [ + 53, + 204, + 333, + 306 + ], + "spans": [ + { + "bbox": [ + 53, + 204, + 333, + 306 + ], + "type": "image", + "image_path": "b348c7b89c665fbdeeb480d0daf269e5c8246d7039aaaa623043b3df6f5fb043.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 364, + 201, + 553, + 304 + ], + "blocks": [ + { + "bbox": [ + 358, + 190, + 471, + 198 + ], + "lines": [ + { + "bbox": [ + 358, + 190, + 471, + 198 + ], + "spans": [ + { + "bbox": [ + 358, + 190, + 471, + 198 + ], + "type": "text", + "content": "Part C. MADDPG-based Scheme" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 364, + 201, + 553, + 304 + ], + "lines": [ + { + "bbox": [ + 364, + 201, + 553, + 304 + ], + "spans": [ + { + "bbox": [ + 364, + 201, + 553, + 304 + ], + "type": "image", + "image_path": "ec488ac4d64ffc43f41564e983772de945b631322c5ef73f5df7d5475abec595.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 323, + 564, + 380 + ], + "lines": [ + { + "bbox": [ + 45, + 323, + 564, + 380 + ], + "spans": [ + { + "bbox": [ + 45, + 323, + 564, + 380 + ], + "type": "text", + "content": "Fig. 4. The overall architecture of the RL for anti-eavesdropping. Part A describes the DDQN-based scheme, where the system state is used to generate actions through the DDQN network, followed by action execution and obtaining the next state and reward. An experience replay mechanism is employed to store and randomly sample training data. Part B presents the DDPG-based scheme, where actions are generated through Actor and Critic networks, interacting with the environment to obtain rewards. An experience replay buffer is used to store and sample mini-batches. Part C describes the MADDPG-based scheme, involving multiple UAV agents, each with its own Actor and Critic networks, interacting with the environment and sharing rewards. Part D showcases the MAPPO-LSTM-based scheme, where Actor and Critic networks with LSTM layers process time-series data and train through an experience replay buffer." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 45, + 399, + 301, + 614 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 399, + 301, + 614 + ], + "spans": [ + { + "bbox": [ + 45, + 399, + 301, + 614 + ], + "type": "text", + "content": "resource allocation problems. For fixed eavesdroppers, simpler optimization models with fewer dynamic variables (e.g., 2D trajectory optimization) can achieve secure communication effectively. However, mobile eavesdroppers require more sophisticated formulations, including 3D trajectory optimization and robust constraints to account for uncertainties in eavesdropper positions. Another important insight is the adaptability of convex optimization when combined with complementary methods like artificial noise jamming and resource allocation strategies. By leveraging convex optimization, systems can balance secrecy performance with energy efficiency, ensuring practical applicability in real-world UAV operations. Techniques such as SCA and BCD have proven highly effective in decoupling complex optimization problems into solvable subproblems, allowing iterative refinement toward locally optimal solutions. Overall, convex optimization offers a flexible and mathematically rigorous approach to securing UAV-enabled communication systems for anti-eavesdropping." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 45, + 616, + 301, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 616, + 301, + 750 + ], + "spans": [ + { + "bbox": [ + 45, + 616, + 301, + 750 + ], + "type": "text", + "content": "As the number of ground devices increases, along with UAV flight time and the number of optimization variables, the computational complexity of conventional algorithms grows exponentially, leading to infeasibility or suboptimal solutions [140], [141]. Moreover, these methods struggle to adapt to real-time scenarios where UAVs must communicate with mobile users and operate in environments with uncertain or partial information [140], [142]. RL enables UAVs to interact with the environment and autonomously learn optimal policies based on real-time observations [143], as shown in Fig. 4. By leveraging Deep RL (DRL), UAVs can efficiently" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 307, + 399, + 563, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 399, + 563, + 459 + ], + "spans": [ + { + "bbox": [ + 307, + 399, + 563, + 459 + ], + "type": "text", + "content": "adapt to changing eavesdropping conditions, optimize secure trajectories, and dynamically allocate resources [144], [145]. This learning-driven approach significantly enhances PLS by ensuring adaptive, scalable, and intelligent anti-eavesdropping strategies in UAV communication networks." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 460, + 564, + 735 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 460, + 564, + 735 + ], + "spans": [ + { + "bbox": [ + 307, + 460, + 564, + 735 + ], + "type": "text", + "content": "The study in [60] proposes a Deep Q-Network (DQN)-based approach to address the challenge of securing UAV-assisted multi-user wireless communications against passive eavesdropping attacks. The UAV trajectory optimization is formulated as a Markov Decision Process (MDP), where the state space includes the UAV's 3D coordinates and the positions of users. The action space consists of discrete movements in the " + }, + { + "bbox": [ + 307, + 460, + 564, + 735 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 307, + 460, + 564, + 735 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 307, + 460, + 564, + 735 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 307, + 460, + 564, + 735 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 307, + 460, + 564, + 735 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 307, + 460, + 564, + 735 + ], + "type": "text", + "content": " directions, with each action representing a step change in position. The reward function is designed to maximize the legitimate users' rates, defined as the sum of the channel capacities of users served by the UAV. Unlike many prior works that assume perfect knowledge of eavesdropper CSI [53], [59], this study focuses on optimizing legitimate user rates and using the DQN-based approach without requiring full knowledge of the eavesdropping channels. The DQN iteratively optimizes the UAV's trajectory, beamforming matrix, and transmit power allocation, ensuring the UAV dynamically adjusts its position to maximize secrecy capacity. Numerical results show that the secrecy capacity improves with the number of users. The proposed method converges an order of magnitude faster than the Q-learning method and achieves around " + }, + { + "bbox": [ + 307, + 460, + 564, + 735 + ], + "type": "inline_equation", + "content": "35\\%" + }, + { + "bbox": [ + 307, + 460, + 564, + 735 + ], + "type": "text", + "content": " higher secrecy capacity than Q-learning after 20,000 episodes." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 318, + 736, + 564, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 736, + 564, + 749 + ], + "spans": [ + { + "bbox": [ + 318, + 736, + 564, + 749 + ], + "type": "text", + "content": "However, the DQN method may face the issue of Q-" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 47, + 92, + 567, + 301 + ], + "blocks": [ + { + "bbox": [ + 50, + 56, + 559, + 83 + ], + "lines": [ + { + "bbox": [ + 50, + 56, + 559, + 83 + ], + "spans": [ + { + "bbox": [ + 50, + 56, + 559, + 83 + ], + "type": "text", + "content": "TABLE V SUMMARY OF RL FOR ANTI-EAVESDROPPING STRATEGY CIRCLES DESCRIBE THE METHODS; CORRECT MARKERS AND CROSS MARKERS REPRESENT PROS AND CONS RESPECTIVELY." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 47, + 92, + 567, + 301 + ], + "lines": [ + { + "bbox": [ + 47, + 92, + 567, + 301 + ], + "spans": [ + { + "bbox": [ + 47, + 92, + 567, + 301 + ], + "type": "table", + "html": "
TechniquesReferenceAlgorithmPros & Cons
Value-based RL[60]DQN○ DQN algorithm for UAV trajectory optimization to maximize the secrecy capacity\n✓ Low computational complexity, making it easy to train\n× Q-value overestimation, leading to suboptimal action selection
[61]DDQN○ DDQN-based joint trajectory, time allocation, and offloading optimization\n✓ Accelerated convergence via action space pruning\n✓ Real-time optimization of trajectory and resources\n× DDQN is restricted to discrete action spaces\n× DDQN is not suitable for continuous action control
Policy Gradient-based RL[62]CAA-MADDPG○ Multi-Agent DRL with attention mechanisms (CAA-MADDPG) for secrecy rate maximization\n✓ Handle complex multi-agent with the attention mechanism\n× Assume prior knowledge of eavesdropper locations\n× Assume ground devices are static
[63]DDPG○ DDPG-based RL for enhancing bi-directional UAV communication security\n✓ Support mobile devices and ensure bi-directional secureit\n× Computational overhead increases with device density\n× performance may be sensitive to hyperparameter selection
[64]PPO+DCCN○ Hybrid DCCN and PPO for secrecy rate maximization\n✓ The PPO optimizes the UAV trajectory based on the results from DCCN\n× The performance is sensitive to the choice of clipping factor in PPO
[65]MAPPO+LSTM○ MAPPO for multi-agent cooperative anti-eavesdropping and LSTM-enhanced sequential learning\n✓ The MAPPO+LSTM improves the learning capability of sequential sample data\n× Assume perfect knowledge of CSI may be challenging in real-world scenarios
", + "image_path": "736a80a4e6b5887a9f24b64146c7c6443017316dd655d4bae306fadd65bc5feb.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 323, + 301, + 646 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 323, + 301, + 646 + ], + "spans": [ + { + "bbox": [ + 45, + 323, + 301, + 646 + ], + "type": "text", + "content": "value overestimation, leading to suboptimal action selection [146]. The authors in [61] propose a double-deep Q-learning (DDQN)-based scheme to jointly optimize the UAV trajectory, time allocation, and offloading decision strategy, aiming to maximize the average secure computing capacity for antieavesdropping in UAV-aided MEC. The system model involves one legitimate UAV server, one illegitimate UAV eavesdropper, one ground jammer, and ground users. The proposed DDQN-based scheme models the optimization problem as an MDP with states, actions, and rewards. The states include the coordinates of the UAVs, while the actions involve offloading decisions, time allocation, and trajectory adjustments. The reward function maximizes secure computing capacity. The DDQN model includes a deep Q-network (QN) and a deep target network (TN) to generate actions and evaluate their values. The parameters of the QN are updated by minimizing the loss function, and the parameters of the TN are periodically updated. The proposed scheme reduces the action space size by deleting illegal actions, such as those that violate time allocation constraints or result in resource waste. Unlike prior works [57], [60] that rely on conventional optimization or DQN with limited consideration of task queues, this approach integrates real-time resource allocation and trajectory optimization while ensuring dynamic constraints. The proposed DDQN scheme converges in 2000 episodes, half the episodes required by DQN (4000 episodes), and achieves a 0.02 Mbits higher average secure computing capacity." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 649, + 301, + 722 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 649, + 301, + 722 + ], + "spans": [ + { + "bbox": [ + 45, + 649, + 301, + 722 + ], + "type": "text", + "content": "The value-based RL method (e.g., DQN) mainly focuses on dealing with discrete action spaces that may lead to the loss of optimal solutions [147]. The policy gradient-based RL method (e.g., Deep Deterministic Policy Gradient (DDPG)) can handle continuous action spaces [148], which are more suitable for UAV trajectory and transmit power optimization problems." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 724, + 301, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 724, + 301, + 749 + ], + "spans": [ + { + "bbox": [ + 45, + 724, + 301, + 749 + ], + "type": "text", + "content": "The authors in [62] propose a multi-agent DRL framework to address the challenge of secure UAV communications in" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 307, + 323, + 564, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 323, + 564, + 502 + ], + "spans": [ + { + "bbox": [ + 307, + 323, + 564, + 502 + ], + "type": "text", + "content": "the presence of eavesdroppers. The system model is similar to Part A of Fig. 3, where the UAV server sends confidential information to ground users, and UAV jammers send AN signals to ground eavesdroppers using 3D beamforming. This study designs the Multi-Agent Deep Deterministic Policy Gradient with a continuous action attention mechanism (CAA-MADDPG) to maximize the system's secrecy rate. The attention mechanism dynamically prioritizes relevant agents' observations (e.g., jammers focusing on eavesdroppers) to reduce the exploration space and accelerate convergence, thereby enhancing the system's ability to counteract eavesdropping attempts. The simulation results show that CAA-MADDPG achieves a secure rate of " + }, + { + "bbox": [ + 307, + 323, + 564, + 502 + ], + "type": "inline_equation", + "content": "4.5\\mathrm{bps / Hz}" + }, + { + "bbox": [ + 307, + 323, + 564, + 502 + ], + "type": "text", + "content": " and converges in 1000 episodes with three UAV jammers, outperforming MADDPG (around " + }, + { + "bbox": [ + 307, + 323, + 564, + 502 + ], + "type": "inline_equation", + "content": "4\\mathrm{bps / Hz}" + }, + { + "bbox": [ + 307, + 323, + 564, + 502 + ], + "type": "text", + "content": " and 1400 episodes) and DDPG schemes." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 307, + 509, + 564, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 509, + 564, + 749 + ], + "spans": [ + { + "bbox": [ + 307, + 509, + 564, + 749 + ], + "type": "text", + "content": "However, the study in [62] just considers UAV-to-ground communication (U2G) and assumed the ground devices are static. The work in [63] addresses the challenge of securing bi-directional ground-UAV communications in a dynamic environment with mobile ground devices and eavesdroppers. Different from prior works that assume static ground eavesdroppers [54], [56], this study considers mobile ground eavesdroppers for more practical real-world scenarios. The communication in U2G and ground-to-UAV (G2U) cases is modeled, considering factors such as channel gains and distances. The problem of maximizing the worst-case average secrecy rate is formulated as a constrained MDP (CMDP) under the constraints of UAV flight space, flight speed, energy capacity, anti-collision, and peak transmit power. To solve the CMDP, the authors design a DDPG-based RL algorithm. The algorithm includes three key components: the primary network (actor and critic networks), the target network, and the replay buffer. They also adopt state normalization and exploration noise to speed up the training convergence of the DDPG. The proposed joint optimization scheme achieves a secrecy rate" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 55, + 299, + 102 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 55, + 299, + 102 + ], + "spans": [ + { + "bbox": [ + 48, + 55, + 299, + 102 + ], + "type": "text", + "content": "over " + }, + { + "bbox": [ + 48, + 55, + 299, + 102 + ], + "type": "inline_equation", + "content": "40\\%" + }, + { + "bbox": [ + 48, + 55, + 299, + 102 + ], + "type": "text", + "content": " higher compared to baselines that optimize only trajectory or only power. In addition, DDPG outperforms DQN by approximately " + }, + { + "bbox": [ + 48, + 55, + 299, + 102 + ], + "type": "inline_equation", + "content": "15\\%" + }, + { + "bbox": [ + 48, + 55, + 299, + 102 + ], + "type": "text", + "content": " in secrecy rate due to its ability to handle continuous actions." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 102, + 299, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 102, + 299, + 162 + ], + "spans": [ + { + "bbox": [ + 48, + 102, + 299, + 162 + ], + "type": "text", + "content": "The DDPGG methods form a fixed mapping from states to actions, which is not suitable for large state spaces that require more exploration and uncertainty [149]. The PPO alleviates this limitation by introducing proximal policy optimization, which allows for more exploration in the large action space." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 163, + 299, + 341 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 163, + 299, + 341 + ], + "spans": [ + { + "bbox": [ + 48, + 163, + 299, + 341 + ], + "type": "text", + "content": "The study in [64] proposes a hybrid framework (Double Cascade Correlation Network (DCCN) + PPO) to maximize the secrecy capacity. DCCN bypasses the need for labeled training data by cascading two neural networks to maximize the secrecy channel rate. The PPO dynamically adjusts the UAV's position by using clipped surrogate objectives to stabilize policy updates and advantage estimation to prioritize high-reward actions. Simulation results show that the proposed scheme (DCCN + PPO) achieves an average secrecy rate of 0.73 bps/Hz, outperforming the benchmarks DCCN + DDPG (0.67 bps/Hz) and Random RIS + PPO (0.13 bps/Hz). However, the average secrecy continues to decline when the transmit power is higher than 2 W, since the jamming signals transmitted by the secondary source against the eavesdropper will also affect the primary users." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 342, + 299, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 342, + 299, + 651 + ], + "spans": [ + { + "bbox": [ + 48, + 342, + 299, + 651 + ], + "type": "text", + "content": "The study in [64] considered only the use of one UAV to assist in secure communication. However, in low-altitude economic networks, it may be more important for multiple UAVs to collaborate to implement anti-eavesdropping strategies. The study in [65] considers a system model treats all secondary transmitters and multiple UAV jammers as multi-agents. A Multi-Agent PPO algorithm combined with Long Short-Term Memory (LSTM) networks, named MAPPO-LSTM, is proposed to maximize the secure communication rate by jointly optimizing the UAV trajectory, transmission power, and energy harvesting coefficients. The problem is formulated as a nonconvex MDP consisting of an action space, state space, observation, and reward (which consists of the sum of the secure communication rate, SINR information, and battery capacity). The MAPPO algorithm introduces counterfactual baselines to address the credit assignment problem in centralized learning and combines with the LSTM network to enhance the learning capability of sequential sample data. Compared to benchmark schemes MAPPO and MADDPG, the proposed MAPPO-LSTM method achieved around " + }, + { + "bbox": [ + 48, + 342, + 299, + 651 + ], + "type": "inline_equation", + "content": "17\\% - 20\\%" + }, + { + "bbox": [ + 48, + 342, + 299, + 651 + ], + "type": "text", + "content": " higher average secrecy rate in large-scale scenarios, with convergence speeds 1.37 times and 1.93 times faster, respectively. In addition, the reward is sensitive to the discount factor, where setting factor to 0.99 enables faster and more stable convergence. Deviations from this value result in more pronounced fluctuations in the reward and secrecy rate." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 653, + 299, + 747 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 653, + 299, + 747 + ], + "spans": [ + { + "bbox": [ + 48, + 653, + 299, + 747 + ], + "type": "text", + "content": "Lesson Learned. The RL has emerged as a powerful yet challenging tool for anti-eavesdropping in UAV-assisted secure communications. A key lesson is that multi-agent cooperation significantly enhances security compared to single-agent approaches, enabling adaptive trajectory control, power allocation, and jamming coordination in dynamic environments. However, scalability and convergence efficiency remain critical bottlenecks, especially in high-dimensional, time-" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 311, + 55, + 562, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 55, + 562, + 222 + ], + "spans": [ + { + "bbox": [ + 311, + 55, + 562, + 222 + ], + "type": "text", + "content": "varying settings, as many studies unrealistically assume perfect channel information, and deep RL's convergence time leaves the system vulnerable before optimization completes. Furthermore, key limitations demand further attention, such as RL's computational complexity, which restricts its use in resource-limited settings requiring real-time security, and its sensitivity to hyperparameter tuning that requires meticulous configuration to ensure optimal performance. Future advancements should focus on developing generalizable and robust learning architectures that can dynamically adapt to evolving threats while maintaining computational feasibility, addressing practical deployment challenges, exploring hybrid approaches, prioritizing security in system design, and balancing security performance with energy consumption." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 311, + 223, + 562, + 341 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 223, + 562, + 341 + ], + "spans": [ + { + "bbox": [ + 311, + 223, + 562, + 341 + ], + "type": "text", + "content": "Deep learning, with its strong learning capabilities, parallel processing, and comprehensive reasoning [150]–[152], has huge potential to enhance anti-eavesdropping strategies in UAV communications, especially in environments with rapidly changing conditions and complex interactions [153]. Given the intricate problem of UAV trajectory variation and its nonlinear characteristics in time and space [154], [155], deep learning networks, such as neural networks and generative models, are emerging as potential solutions to improve the security and performance of UAV communication systems." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 311, + 342, + 562, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 342, + 562, + 567 + ], + "spans": [ + { + "bbox": [ + 311, + 342, + 562, + 567 + ], + "type": "text", + "content": "The study in [66] explores the use of deep learning to optimize UAV deployment and jamming strategies against eavesdroppers to maximize the secrecy rate in the complete CSI scenario. The optimization problem is decomposed into two layers: the inner layer optimizes jamming power for a fixed UAV location, and the outer layer optimizes UAV deployment. The inner problem is solved using a bi-section search algorithm, while the outer problem is addressed using a deep neural network (DNN) to approximate the optimal UAV deployment. The DNN is designed as a fully connected structure, which includes an input layer, two hidden layers, and an output layer, as shown in part A of Fig. 5. The DNN is trained using a dataset generated by simulating different UAV deployments and corresponding secrecy rates. The final optimal deployment of the UAV can be approximated when the mean square error of weights between neurons is minimized. The DNN model achieves an average distance error of 2.2 meters compared to the optimal deployment found by the exhaustive search baseline." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 311, + 569, + 562, + 747 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 569, + 562, + 747 + ], + "spans": [ + { + "bbox": [ + 311, + 569, + 562, + 747 + ], + "type": "text", + "content": "The fully connected neural network used in [66] is suited for problems where inputs and outputs are fixed-dimensional vectors without inherent spatial or sequential relationships [150]. Moreover, convolutional neural networks (CNNs) and recurrent neural networks (RNNs) can also contribute to antieavesdropping. In contrast to fully connected networks, CNNs are particularly effective for exploring spatial features from images or spatial maps [156]. RNNs, on the other hand, focus on handling sequential data by maintaining a memory of previous inputs through recurrent connections [150]. The authors in [67] propose a CNN-LSTM-based secure efficiency map (SEM) framework, which is constructed by calculating each subarea's security-efficiency index using a weighted exponential coefficient to combine normalized secure spectrum efficiency (secrecy rate per unit bandwidth) and secure energy" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 555, + 25, + 563, + 31 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 25, + 563, + 31 + ], + "spans": [ + { + "bbox": [ + 555, + 25, + 563, + 31 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 64, + 92, + 547, + 302 + ], + "blocks": [ + { + "bbox": [ + 50, + 56, + 559, + 83 + ], + "lines": [ + { + "bbox": [ + 50, + 56, + 559, + 83 + ], + "spans": [ + { + "bbox": [ + 50, + 56, + 559, + 83 + ], + "type": "text", + "content": "TABLE VI SUMMARY OF DEEP LEARNING FOR ANTI-EAVESDROPPING STRATEGY CIRCLES DESCRIBE THE METHODS; CORRECT MARKERS AND CROSS MARKERS REPRESENT PROS AND CONS RESPECTIVELY." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 64, + 92, + 547, + 302 + ], + "lines": [ + { + "bbox": [ + 64, + 92, + 547, + 302 + ], + "spans": [ + { + "bbox": [ + 64, + 92, + 547, + 302 + ], + "type": "table", + "html": "
TechniquesReferenceAlgorithmPros & Cons
Neural network model[66]DNN● Use DNN to optimize UAV deployment and jamming strategies for secure communication\n● The DNN model reduces the complexity of exhaustive searches\n● Rely on complete statistical channel knowledge\n● require intensive resources to generate a large amount of training data
[67]CNN-LSTM● CNN-LSTM-based SEM prediction for dynamic secure UAV trajectory optimization\n● Efficient spatiotemporal feature extraction via CNN-LSTM\n● Assume fixed UAV height and passive eavesdropper\n● Training CNN-LSTM network requires a substantial amount of data
[68]FL-DNN● FL-DNN-RL integration (FairLearn) for maximizing fairness in secrecy rates\n● Collaborative learning via FL improves generalization in anti-eavesdropping strategies\n● Involving multiple learning mechanisms requires significant computational resources\n● Assuming perfect CSI and eavesdropper localization may be impractical
Generative AI model[69]MD-GAN● MD-GAN with unknown CSI as model weights\n● Adapt to dynamic environments via gradient-based training\n● Do not require knowledge of the eavesdropper's detection threshold\n● Training a GAN can be computationally intensive
[70]DD-GAN● DD-GAN uses genetic algorithm-generated datasets for GAN training\n● Achieve an effective trade-off between covert rate and detection probability\n● Training relies on the quality and quantity of the genetic algorithm-generated data
[71]GDMTD3● GDMTD3 integrates generative diffusion models into TD3\n● Handle high-dimensional action spaces to adapt mobile eavesdroppers\n● Computational complexity from diffusion denoising steps
", + "image_path": "fce984a856adf58da26013e00ddd120d464b283587d49890dccfd1c7c26d073d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 323, + 301, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 323, + 301, + 441 + ], + "spans": [ + { + "bbox": [ + 45, + 323, + 301, + 441 + ], + "type": "text", + "content": "efficiency (secrecy rate per unit power). Historical SEMs are fed into a CNN-LSTM network to predict future SEMs by leveraging spatial-temporal feature extraction and time-series correlation. Based on predicted SEMs, a trajectory planning algorithm dynamically guides the UAV to subareas with the highest security-efficiency indices. The proposed SEM-enabled trajectory planning achieves an average security-efficiency index of 0.81, outperforming baseline schemes (e.g., static trajectory [142] or non-predictive methods [62], [157]) by over " + }, + { + "bbox": [ + 45, + 323, + 301, + 441 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 45, + 323, + 301, + 441 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 445, + 301, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 445, + 301, + 709 + ], + "spans": [ + { + "bbox": [ + 45, + 445, + 301, + 709 + ], + "type": "text", + "content": "Previous deep learning-based architectures [66], [67] are centralized, lacking collaboration and knowledge sharing among UAVs, while also facing challenges in privacy preservation and scalability. To address these limitations and optimize secrecy rate maximization under constraints such as UAV mobility, power budgets, and scheduling fairness, the authors in [68] propose a federated learning (FL)-based framework (FairLearn). As shown in part B of Fig. 5, the FairLearn employs three learning modules: (1) Module-D uses RL to dynamically generate training datasets by exploring UAV trajectories, power allocation, and scheduling policies; (2) Module-P employs a DNN trained on these datasets to predict optimal 3D trajectory, transmit power, and user scheduling, maximizing proportional fairness in secrecy rates (defined as the difference between legitimate UAV-user rates and eavesdropper rates); (3) Module-C applies FL to aggregate DNN models across UAVs, enabling collaborative learning while preserving data privacy. Simulation results show that FairLearn's secrecy rate is " + }, + { + "bbox": [ + 45, + 445, + 301, + 709 + ], + "type": "inline_equation", + "content": "26.6\\%" + }, + { + "bbox": [ + 45, + 445, + 301, + 709 + ], + "type": "text", + "content": " higher than BCD at 1.4W transmit power. After 100s of execution, FairLearn achieves " + }, + { + "bbox": [ + 45, + 445, + 301, + 709 + ], + "type": "inline_equation", + "content": "14.34\\%" + }, + { + "bbox": [ + 45, + 445, + 301, + 709 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 45, + 445, + 301, + 709 + ], + "type": "inline_equation", + "content": "24.56\\%" + }, + { + "bbox": [ + 45, + 445, + 301, + 709 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 45, + 445, + 301, + 709 + ], + "type": "inline_equation", + "content": "108\\%" + }, + { + "bbox": [ + 45, + 445, + 301, + 709 + ], + "type": "text", + "content": " higher secrecy rates than BCD, MAQ, and QCQP baselines, respectively." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 712, + 301, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 712, + 301, + 750 + ], + "spans": [ + { + "bbox": [ + 45, + 712, + 301, + 750 + ], + "type": "text", + "content": "It is worth noting that UAVs can only obtain limited prior environmental information without knowing perfect channel information and the eavesdropper's detection threshold or ex" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 307, + 323, + 564, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 323, + 564, + 441 + ], + "spans": [ + { + "bbox": [ + 307, + 323, + 564, + 441 + ], + "type": "text", + "content": "act location. Some previous methods [59], [65], [68] may find it difficult to solve the optimization problem in such scenarios. In contrast, the generative adversarial network (GAN) has emerged as a new model for solving optimization problems with limited prior information [158], [159]. GAN can effectively model and approximate unknown distributions (such as channel coefficients, detection thresholds, and environmental parameters) through adversarial learning, where the generator continuously improves its strategy by learning from the feedback from the discriminator [158]." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 307, + 449, + 564, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 449, + 564, + 750 + ], + "spans": [ + { + "bbox": [ + 307, + 449, + 564, + 750 + ], + "type": "text", + "content": "The work in [69] addresses challenges related to partial channel distribution information and unknown eavesdropper detection thresholds by proposing a model-driven GAN (MDGAN) framework. The unknown channel coefficients and detection thresholds are treated as trainable weights in the MD-GAN. The MD-GAN transforms the joint trajectory and power optimization problem into a dynamic game between a generator (UAV) and a discriminator (eavesdropper), where the UAV acts as a jammer to protect secondary users from eavesdroppers. The generator optimizes the UAV's 3D trajectory and jamming power, while the discriminator evaluates detection errors. Then, a GAN-based joint trajectory and power optimization (GAN-JTP) algorithm is developed to achieve Nash equilibrium (i.e., maximizing the covert rate and the probability of detection errors). As shown in part C of Fig. 5, the GAN-JTP algorithm consists of two stages: network learning and network training. In the network learning stage, the generator optimizes the UAV's trajectory and transmit power based on the current state and environment. In the network training stage, the generator and discriminator are alternately trained using gradient backpropagation to update their weights. Simulation results show that increasing the training of the discriminator accelerates the convergence of the generator (e.g., when the training step is 10, convergence is achieved within 30 iterations, compared to 89 iterations when" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 51, + 69, + 249, + 138 + ], + "blocks": [ + { + "bbox": [ + 51, + 59, + 152, + 68 + ], + "lines": [ + { + "bbox": [ + 51, + 59, + 152, + 68 + ], + "spans": [ + { + "bbox": [ + 51, + 59, + 152, + 68 + ], + "type": "text", + "content": "Part A. DNN-based Architecture" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 51, + 69, + 249, + 138 + ], + "lines": [ + { + "bbox": [ + 51, + 69, + 249, + 138 + ], + "spans": [ + { + "bbox": [ + 51, + 69, + 249, + 138 + ], + "type": "image", + "image_path": "44334917753d0735f58e494eaeb14ab8b73c2c003ece0c5817ff07e552328506.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 256, + 69, + 557, + 137 + ], + "blocks": [ + { + "bbox": [ + 256, + 59, + 365, + 68 + ], + "lines": [ + { + "bbox": [ + 256, + 59, + 365, + 68 + ], + "spans": [ + { + "bbox": [ + 256, + 59, + 365, + 68 + ], + "type": "text", + "content": "Part B. FL-DNN-based Architecture" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 256, + 69, + 557, + 137 + ], + "lines": [ + { + "bbox": [ + 256, + 69, + 557, + 137 + ], + "spans": [ + { + "bbox": [ + 256, + 69, + 557, + 137 + ], + "type": "image", + "image_path": "299abeaa978c56d61eea7db4a1382c43b7e456df3bd4d429ea25d378ff930e93.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 51, + 156, + 249, + 316 + ], + "blocks": [ + { + "bbox": [ + 52, + 145, + 165, + 154 + ], + "lines": [ + { + "bbox": [ + 52, + 145, + 165, + 154 + ], + "spans": [ + { + "bbox": [ + 52, + 145, + 165, + 154 + ], + "type": "text", + "content": "Part C. MD-GAN-based Architecture" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 51, + 156, + 249, + 316 + ], + "lines": [ + { + "bbox": [ + 51, + 156, + 249, + 316 + ], + "spans": [ + { + "bbox": [ + 51, + 156, + 249, + 316 + ], + "type": "image", + "image_path": "7550af879c50bedad2e22d1c77a418e5ebe094455580bef1f05b38b4074d674c.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 258, + 156, + 557, + 227 + ], + "blocks": [ + { + "bbox": [ + 258, + 145, + 370, + 154 + ], + "lines": [ + { + "bbox": [ + 258, + 145, + 370, + 154 + ], + "spans": [ + { + "bbox": [ + 258, + 145, + 370, + 154 + ], + "type": "text", + "content": "Part D. DD-GAN-based Architecture" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 258, + 156, + 557, + 227 + ], + "lines": [ + { + "bbox": [ + 258, + 156, + 557, + 227 + ], + "spans": [ + { + "bbox": [ + 258, + 156, + 557, + 227 + ], + "type": "image", + "image_path": "12a7ffca05ab706e4c5b6609a65fdf2301b70f22019495c09b13cae13284d61e.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 258, + 245, + 557, + 316 + ], + "blocks": [ + { + "bbox": [ + 256, + 235, + 369, + 243 + ], + "lines": [ + { + "bbox": [ + 256, + 235, + 369, + 243 + ], + "spans": [ + { + "bbox": [ + 256, + 235, + 369, + 243 + ], + "type": "text", + "content": "Part E. GDMTD3-based Architecture" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 258, + 245, + 557, + 316 + ], + "lines": [ + { + "bbox": [ + 258, + 245, + 557, + 316 + ], + "spans": [ + { + "bbox": [ + 258, + 245, + 557, + 316 + ], + "type": "image", + "image_path": "455a544780c102ab3732612d2d11ccdd816c55ddf36f38780acd13758b685f1b.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 329, + 564, + 394 + ], + "lines": [ + { + "bbox": [ + 45, + 329, + 564, + 394 + ], + "spans": [ + { + "bbox": [ + 45, + 329, + 564, + 394 + ], + "type": "text", + "content": "Fig. 5. The overall architecture illustrates various deep learning-based architectures designed to enhance anti-eavesdropping capabilities in UAV deployment scenarios. Part A presents a DNN-based architecture that processes air-ground and ground-ground channel states to determine UAV deployment. Part B shows an FL-DNN-based architecture, incorporating modules for reinforcement learning, DNN-based feature mapping, and FL. Part C depicts an MD-GAN-based architecture, where a generator produces trajectories and power outputs based on location and environment status, while a discriminator evaluates the decisions. Part D introduces a DD-GAN-based architecture, focusing on generating jamming solutions to maximize covert rates, with a discriminator distinguishing between jamming and non-jamming solutions. Part E illustrates a GDMTD3-based architecture, utilizing an experience replay buffer and diffusion reverse process to optimize UAV deployment strategies." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 45, + 414, + 301, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 414, + 301, + 460 + ], + "spans": [ + { + "bbox": [ + 45, + 414, + 301, + 460 + ], + "type": "text", + "content": "the training step is 1). For a flight period of 100 seconds, the GAN-JTP algorithm achieves a " + }, + { + "bbox": [ + 45, + 414, + 301, + 460 + ], + "type": "inline_equation", + "content": "0.47\\%" + }, + { + "bbox": [ + 45, + 414, + 301, + 460 + ], + "type": "text", + "content": " increase in covert rate with a " + }, + { + "bbox": [ + 45, + 414, + 301, + 460 + ], + "type": "inline_equation", + "content": "0.15\\%" + }, + { + "bbox": [ + 45, + 414, + 301, + 460 + ], + "type": "text", + "content": " reduction in detection error probability compared to the BCD-based scheme [160]." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 45, + 461, + 301, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 461, + 301, + 723 + ], + "spans": [ + { + "bbox": [ + 45, + 461, + 301, + 723 + ], + "type": "text", + "content": "The MD-GAN [69] relies on model-driven methods where the unknown channel information and detection threshold are treated as trained weights. Differently, the authors in [70] propose a data-driven GAN (DD-GAN) framework that focuses on generating data consisting of environmental parameters and optimal solutions to train the GAN. Specifically, the DD-GAN transforms the optimization process into an interactive game between the UAV and eavesdropper, where the UAV aims to maximize the covert rate, and the eavesdropper aims to detect the presence of covert communication. To address the non-convexity and lack of eavesdropper detection threshold information in the optimization process, the DD-GAN trains a generator (UAV) and discriminator (eavesdropper) adversarially, using genetic algorithm-generated samples as prior data, as shown in part D of Fig. 5. The generator produces power and trajectory solutions, while the discriminator evaluates the detectability. The loss function of the discriminator is designed to maximize the probability of correctly identifying real data and minimize the probability of being fooled by generated data. The generator's loss function aims to maximize the probability that the generated data is mistaken for real data by the discriminator." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 45, + 724, + 301, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 724, + 301, + 749 + ], + "spans": [ + { + "bbox": [ + 45, + 724, + 301, + 749 + ], + "type": "text", + "content": "Besides GANs [69], [70], another generative model, the diffusion model, has advanced the effective representation" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 414, + 564, + 738 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 414, + 564, + 738 + ], + "spans": [ + { + "bbox": [ + 307, + 414, + 564, + 738 + ], + "type": "text", + "content": "of multi-dimensional data distributions [161]. The diffusion model can better capture the complex dynamics and the tradeoff in the multi-objective optimization problem concerning secure communication [112]. For example, The diffusion model captures complex state-action distributions, enabling adaptive beamforming and UAV repositioning under eavesdropper mobility. To tackle dynamic environments and high-dimensional action spaces in secure communication and energy efficiency multi-objective optimization problem, the authors in [71] propose GDMTD3, a Twin Delayed Deep Deterministic Policy Gradient (TD3) algorithm enhanced with generative diffusion models. Key innovations include integrating diffusion-based reverse processes into the actor network for robust policy generation and addressing continuous action spaces, as shown in part E in Fig. 5. The training process of GDMTD3 involves initializing the online critic and actor networks, interacting with the environment, and updating the network parameters based on the collected experiences. The actor network uses a generative diffusion model to sample actions, while the critic networks evaluate the actions using twin critic networks to reduce overestimation bias. Simulation results show that GDMTD3 outperforms DRL-based benchmarks (including PPO, TD3, and DDPG), achieving about " + }, + { + "bbox": [ + 307, + 414, + 564, + 738 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 307, + 414, + 564, + 738 + ], + "type": "text", + "content": " higher cumulative rewards and around " + }, + { + "bbox": [ + 307, + 414, + 564, + 738 + ], + "type": "inline_equation", + "content": "21\\%" + }, + { + "bbox": [ + 307, + 414, + 564, + 738 + ], + "type": "text", + "content": " higher average secrecy rate than TD3. In addition, when the number of UAVs increases from 4 to 8, the average secrecy rate increases accordingly. However, increasing the number of UAVs from 8 to 16 raises" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 51, + 92, + 559, + 275 + ], + "blocks": [ + { + "bbox": [ + 50, + 56, + 559, + 84 + ], + "lines": [ + { + "bbox": [ + 50, + 56, + 559, + 84 + ], + "spans": [ + { + "bbox": [ + 50, + 56, + 559, + 84 + ], + "type": "text", + "content": "TABLE VII SUMMARY OF AUTHENTICATION FOR COMMUNICATION CONFIDENTIALITY CIRCLES DESCRIBE THE METHODS; CORRECT MARKERS AND CROSS MARKERS REPRESENT PROS AND CONS RESPECTIVELY." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 51, + 92, + 559, + 275 + ], + "lines": [ + { + "bbox": [ + 51, + 92, + 559, + 275 + ], + "spans": [ + { + "bbox": [ + 51, + 92, + 559, + 275 + ], + "type": "table", + "html": "
TechniquesReferenceAlgorithmPros & Cons
PUFs-based authentication[72]PUF\\(s^1\\)PUF-based dynamic session key generation and mutual authentication protocol\n✓ Lightweight design with no stored secrets\n✗ Potential overhead during temporary identity updates
[73]PUF-fuzzy extractorPUF-fuzzy extractor mutual authentication with TS-based dynamic session adaptation\n✓ Dynamic session time adaptation minimizes idle periods and enhancing security\n✗ Higher computational cost due to fuzzy extractors
[74]PUFs-fuzzy extractor-AEGISIntegration of PUFs, fuzzy extractors, and AEGIS for mutual authentication\n✓ The proposed password/biometric update mechanism reduces server dependency\n✗ Multiple cryptographic operations and protocols may be cause delay in the implementation
Channel based authentication[75]Rician channelPhysical-layer fingerprinting authentication based on Rician channel characteristics\n✓ Optimal power allocation balances data, AN, and authentication tag transmission\n✗ Reliance on encrypted tags requires high demand on UAV processing capabilities
[76]Rayleigh channelSNR difference-based PLA scheme\n✓ The SNR-based design can be implemented without additional hardware infrastructure\n✗ The simplified Rayleigh channel model may limit to real-world propagation environments
[77]Rayleigh/Rician channelsAD metric2for authentication under Rayleigh/Rician channels\n✓ AD metric-based method improves the detection accuracy of authentication\n✓ Detailed analysis of authentication performance under different propagation conditions\n✗ Computational complexity in Rician channels due to hypergeometric functions
", + "image_path": "a098c7255a5103f909ef1e57c6ad04b2fe83cf8cff00c95130989f6352c468f0.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 47, + 275, + 553, + 292 + ], + "lines": [ + { + "bbox": [ + 47, + 275, + 553, + 292 + ], + "spans": [ + { + "bbox": [ + 47, + 275, + 553, + 292 + ], + "type": "text", + "content": "Physical Unclonable Functions (PUFs) are hardware-based security primitives that exploit inherent and unique physical variations in devices to generate unclonable and unpredictable responses for communication authentication." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_footnote" + }, + { + "bbox": [ + 47, + 292, + 519, + 301 + ], + "lines": [ + { + "bbox": [ + 47, + 292, + 519, + 301 + ], + "spans": [ + { + "bbox": [ + 47, + 292, + 519, + 301 + ], + "type": "text", + "content": "2Authentication Distance (AD) is a metric proposed in [77] to distinguish legitimate and illegitimate signals for communication authentication." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 323, + 301, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 323, + 301, + 346 + ], + "spans": [ + { + "bbox": [ + 45, + 323, + 301, + 346 + ], + "type": "text", + "content": "energy consumption but only marginally improves secrecy rates, highlighting a performance-energy trade-off." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 346, + 300, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 346, + 300, + 466 + ], + "spans": [ + { + "bbox": [ + 45, + 346, + 300, + 466 + ], + "type": "text", + "content": "Lesson Learned A key lesson learned is that deep learning, particularly through advanced architectures such as GANs [69], [70] and diffusion models [71], can address complex, dynamic environments with partial channel state information and unknown eavesdropper locations, while demonstrating superior performance over traditional methods [59], [65], [66]. These approaches demonstrate that deep learning not only strengthens the resilience of secure communications but also enables autonomous, real-time decision-making to counteract evolving eavesdropping threats in UAV networks." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 482, + 189, + 493 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 482, + 189, + 493 + ], + "spans": [ + { + "bbox": [ + 45, + 482, + 189, + 493 + ], + "type": "text", + "content": "B. Communication Authentication" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 45, + 497, + 300, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 497, + 300, + 651 + ], + "spans": [ + { + "bbox": [ + 45, + 497, + 300, + 651 + ], + "type": "text", + "content": "In the LAENet, as UAVs operate in open environments and rely on wireless communication, they are highly vulnerable to security threats such as node capture and man-in-the-middle attacks [46]. Ensuring secure and reliable authentication between UAVs and ground stations/users or among UAVs is critical to preventing unauthorized access [52], [162]. Traditional cryptographic authentication schemes often impose significant computational and memory overheads and incur considerable lantency, making them unsuitable for resource-constrained UAVs [163]. Recently, advancements such as PUFs and Physical-layer Authentication (PLA) mechanisms have opened new possibilities for lightweight and effective authentication in the LAENet." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 45, + 652, + 301, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 652, + 301, + 750 + ], + "spans": [ + { + "bbox": [ + 45, + 652, + 301, + 750 + ], + "type": "text", + "content": "PUFs are a class of hardware security primitives that leverage the inherent manufacturing variations (such as variations in circuit delay or RF properties) in semiconductor devices to generate unique and unpredictable responses [164]. When a specific input is applied to a PUF, the device generates a corresponding response, forming a challenge-response pair that is unique to this device [164]. Such uniqueness and unpredictability make PUFs highly resistant to cloning and" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 307, + 323, + 563, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 323, + 563, + 382 + ], + "spans": [ + { + "bbox": [ + 307, + 323, + 563, + 382 + ], + "type": "text", + "content": "tampering, making them as a secure means for device authentication and key generation [165]. In addition, employing a PUF in a UAV allows for secure authentication without the need for complex cryptographic operations, making it an efficient solution for resource-constrained scenarios [166]." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 307, + 386, + 563, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 386, + 563, + 553 + ], + "spans": [ + { + "bbox": [ + 307, + 386, + 563, + 553 + ], + "type": "text", + "content": "The work in [72] proposes a lightweight mutual authentication protocol, named SecAuthUAV, for securing UAV-ground station and UAV-UAV communications. SecAuthUAV employs PUFs in each UAV to generate a unique, unclonable session key that functions as a non-reproducible fingerprint. The protocol consists of three phases, as shown in Fig. 6. During UAV registration, a challenge-response pair from the UAV's PUF is stored, and a temporary identity is generated. In the UAV-ground station authentication phase, the UAV and ground station authenticate each other using challenge-response pairs and nonces, establish a session key, and update their identities. Lastly, in the UAV-UAV authentication phase, the GS facilitates secure communication by authenticating a second UAV and generates a session key for both UAVs." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 556, + 563, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 556, + 563, + 748 + ], + "spans": [ + { + "bbox": [ + 307, + 556, + 563, + 748 + ], + "type": "text", + "content": "However, the work in [72] ignores the fact that the noise in PUFs can result in significant deviation in the output for the same input at different time points. In addition, [72] does not adjust the session time after establishing an authenticated session between two parties, which may lead to the active session remaining idle for a long time and thus give an opportunity for an adversary to interfere with the communication link. In light of this, the authors in [73] propose an UAV Authentication with Adaptive Session (UAAS) framework to address these challenges. Firstly, they combine PUFs and fuzzy extractors to address PUF noise. The fuzzy extractors consist of two phases: the " + }, + { + "bbox": [ + 307, + 556, + 563, + 748 + ], + "type": "inline_equation", + "content": "Gen(.)" + }, + { + "bbox": [ + 307, + 556, + 563, + 748 + ], + "type": "text", + "content": " phase creates a key and non-sensitive helper data, and the " + }, + { + "bbox": [ + 307, + 556, + 563, + 748 + ], + "type": "inline_equation", + "content": "Rep(.)" + }, + { + "bbox": [ + 307, + 556, + 563, + 748 + ], + "type": "text", + "content": " phase reconstructs the key from a noisy PUF response using the helper data while tolerating minor deviations. Then, the Thompson Sampling (TS)-based scheme is proposed to dynamically adapt the session time." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 555, + 25, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 25, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 555, + 25, + 563, + 32 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 49, + 53, + 561, + 244 + ], + "blocks": [ + { + "bbox": [ + 49, + 53, + 561, + 244 + ], + "lines": [ + { + "bbox": [ + 49, + 53, + 561, + 244 + ], + "spans": [ + { + "bbox": [ + 49, + 53, + 561, + 244 + ], + "type": "image", + "image_path": "1a454107197f0583fff2fc6224b7c4e7b0c3d8f3354f603056d3b238606f5780.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 253, + 566, + 301 + ], + "lines": [ + { + "bbox": [ + 45, + 253, + 566, + 301 + ], + "spans": [ + { + "bbox": [ + 45, + 253, + 566, + 301 + ], + "type": "text", + "content": "Fig. 6. The overall architecture of the PUF-based authentication schemes for UAV-GS and UAV-UAV communication in [72]. Part A illustrates the PUF-based authentication process between a UAV and a ground station (GS). The UAV sends its ID and a nonce to the GS, which responds with a hash value based on the PUF, UAV ID, and nonce. The UAV then sends a value derived from the PUF and another nonce, and the GS verifies the authentication by comparing hash values. Part B shows the PUF-based authentication between two UAVs (U1 and U2) through the GS. After establishing a session key with the GS, U1 requests a connection to U2. The GS facilitates the authentication by generating a new session key, which is securely shared between U1 and U2." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 45, + 319, + 301, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 319, + 301, + 416 + ], + "spans": [ + { + "bbox": [ + 45, + 319, + 301, + 416 + ], + "type": "text", + "content": "TS is a probabilistic approach that balances exploration and exploitation, determining the session time based on the fraction of busy time to minimize idle periods and reduce the risk of adversarial interference. Although the security analysis demonstrates that UAAS improves the security level in the mutual authentication mechanism, its throughput is " + }, + { + "bbox": [ + 45, + 319, + 301, + 416 + ], + "type": "inline_equation", + "content": "20.38\\%" + }, + { + "bbox": [ + 45, + 319, + 301, + 416 + ], + "type": "text", + "content": " lower and computational cost is 126 ms higher than the baseline [72] due to security overhead." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 420, + 301, + 636 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 420, + 301, + 636 + ], + "spans": [ + { + "bbox": [ + 45, + 420, + 301, + 636 + ], + "type": "text", + "content": "In the LAENet, while establishing mutual authentication between UAVs and ground stations is critical, it is also important to incorporate role-specific access controls for users to ensure communication confidentiality and preventing unauthorized access [167]. The work in [74] proposes an authentication framework PUF-enabled authentication framework for Internet of Drones (PAF-IoD) to establish mutual authentication among users, UAVs, and ground stations. Users need to authenticate with stations to access the stored data or communicate directly with UAVs, where the users' authentication mechanism includes three factors (identity, password, and biometric data). Similar to [73], PAF-IoD uses PUFs and a fuzzy extractor in the authentication process to generate a unique and tamper-proof session key while tolerating the noise in PUFs. Furthermore, the designed authenticated encryption with associative data (AEAD)-based encryption algorithm is utilized for encrypting and decrypting messages exchanged between the user, ground station server, and UAVs." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 640, + 301, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 640, + 301, + 750 + ], + "spans": [ + { + "bbox": [ + 45, + 640, + 301, + 750 + ], + "type": "text", + "content": "In addition to leveraging intrinsic physical properties of hardware for authentication design through PUFs [72]–[74], the characteristics of communication channels can be used for authentication. The PLA mechanism authenticates devices by exploiting the unique physical characteristics of wireless communication channels, such as CSI, received signal strength (RSS), and signal-to-noise ratio (SNR) [168]. The main reason is that the wireless channel between two communicating entities exhibits location-specific and time-varying properties" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 307, + 319, + 564, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 319, + 564, + 415 + ], + "spans": [ + { + "bbox": [ + 307, + 319, + 564, + 415 + ], + "type": "text", + "content": "due to multipath propagation, fading, and environmental factors [169]. These diverse physical channel conditions, which provide a robust set of features for authentication, have been investigated in terrestrial communication networks [168]–[170]. Furthermore, the source of received signals can be accurately and promptly detected [170], making PLA particularly advantageous in the dynamic and complex communication environments of the LAENet." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 308, + 420, + 564, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 420, + 564, + 589 + ], + "spans": [ + { + "bbox": [ + 308, + 420, + 564, + 589 + ], + "type": "text", + "content": "The authors in [75] leverage the unique properties of the physical layer channel, Rician channel, to develop a PLA for UAV-ground station communication. Given that UAVs receive signals subject to the Rician fading model, the ground station integrates authentication directly into the transmission process. It employs a one-way collision-resistant function (e.g. cryptographic hash function) that combines data symbols with a shared secret key to generate a low-power authentication tag for UAV and seamlessly embeds it into the transmitted signal. The authentication tag is validated by the correlation shaped by the Rician statistical characteristics of the fading channel, i.e., the correlation between the estimated tag (derived from the received signal) and the expected tag (generated using the secret key and decoded data symbols)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 307, + 593, + 564, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 593, + 564, + 750 + ], + "spans": [ + { + "bbox": [ + 307, + 593, + 564, + 750 + ], + "type": "text", + "content": "However, the work in [75] still partially relies on cryptographic tag generation for authentication, which may not be suitable for UAVs with limited processing capabilities. The study in [76] leverages channel characteristics and geographical locations for PLA design, where the SNR differences between consecutive transmissions are utilized as the authentication metric. Specifically, a legitimate transmitter and a jammer have distinct channel variations due to differences in their geographical locations. The UAV authenticates the legitimate transmitter or jammer by formulating a binary hypothesis test based on the SNR difference between two successive transmissions. If the difference falls within a predefined threshold, the transmission is authenticated as from the legitimate" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 78, + 79, + 272, + 242 + ], + "blocks": [ + { + "bbox": [ + 71, + 62, + 256, + 73 + ], + "lines": [ + { + "bbox": [ + 71, + 62, + 256, + 73 + ], + "spans": [ + { + "bbox": [ + 71, + 62, + 256, + 73 + ], + "type": "text", + "content": "Part A. Channel-based authentication in urban" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 78, + 79, + 272, + 242 + ], + "lines": [ + { + "bbox": [ + 78, + 79, + 272, + 242 + ], + "spans": [ + { + "bbox": [ + 78, + 79, + 272, + 242 + ], + "type": "image", + "image_path": "f2a0eafa0a8185bc1fc9742760bf005b8217a9d36dcaad8f6cc2e5c56af12ae0.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 70, + 280, + 279, + 443 + ], + "blocks": [ + { + "bbox": [ + 71, + 263, + 269, + 274 + ], + "lines": [ + { + "bbox": [ + 71, + 263, + 269, + 274 + ], + "spans": [ + { + "bbox": [ + 71, + 263, + 269, + 274 + ], + "type": "text", + "content": "Part B. Channel-based authentication in suburban" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 70, + 280, + 279, + 443 + ], + "lines": [ + { + "bbox": [ + 70, + 280, + 279, + 443 + ], + "spans": [ + { + "bbox": [ + 70, + 280, + 279, + 443 + ], + "type": "image", + "image_path": "967f281427d80e32166ad3daced3cb08fb45a53cc42cab57a86b73497c509e23.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 457, + 301, + 533 + ], + "lines": [ + { + "bbox": [ + 45, + 457, + 301, + 533 + ], + "spans": [ + { + "bbox": [ + 45, + 457, + 301, + 533 + ], + "type": "text", + "content": "Fig. 7. The overall architecture of the channel-based authentication in urban and suburban environments in [77]. Part A depicts the authentication process in an urban environment under Rayleigh channel conditions. The UAV receiver calculates the SNR, computes the AD, and compares it with a detection threshold to distinguish between legitimate and malicious sensors. Part B illustrates the authentication process in a suburban environment, where the UAV receiver performs similar steps to authenticate legitimate sensors and detect malicious ones under Rayleigh channel conditions." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 555, + 300, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 555, + 300, + 651 + ], + "spans": [ + { + "bbox": [ + 45, + 555, + 300, + 651 + ], + "type": "text", + "content": "transmitter; otherwise, it is classified as a jammer. The closed-form expressions for the probability density function of SNR differences, false alarm rate (FAR), and miss detection rate (MDR) are derived under Rayleigh fading channels in single-UAV and dual-UAV scenarios. The non-convex optimization problem of minimizing MDR under FAR constraints is solved using an SCA algorithm, which outperforms the RSS-based baseline [90] by about " + }, + { + "bbox": [ + 45, + 555, + 300, + 651 + ], + "type": "inline_equation", + "content": "40\\%" + }, + { + "bbox": [ + 45, + 555, + 300, + 651 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 652, + 301, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 652, + 301, + 749 + ], + "spans": [ + { + "bbox": [ + 45, + 652, + 301, + 749 + ], + "type": "text", + "content": "It is worth noting that study [75] may lack a comprehensive analysis of the UAV-PLA performance under different propagation conditions. Additionally, the detection performance may be further improved with other indicators. As shown in Fig. 7, the work in [77] proposes a novel PLA framework under different propagation conditions, including dense urban and suburban environments modeled by Rayleigh and Rician channels, respectively. A new metric, Authentication Distance" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 307, + 54, + 564, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 54, + 564, + 209 + ], + "spans": [ + { + "bbox": [ + 307, + 54, + 564, + 209 + ], + "type": "text", + "content": "(AD), is proposed as the normalized difference in received SNR between adjacent transmissions. For Rayleigh channels, closed-form expressions for FAR and MDR are derived using convolution and integration-by-parts, while Rician channels employ doubly non-central " + }, + { + "bbox": [ + 307, + 54, + 564, + 209 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 307, + 54, + 564, + 209 + ], + "type": "text", + "content": " distributions to model AD statistics. Similar to study [76], this authentication framework minimizes MDR under FAR constraints. In dense urban settings, MDR depends on path loss and transmitter-UAV geometry. For suburban environments, it incorporates elevation angle-dependent Rician factors and path loss exponents to improve discriminability between legitimate and illegitimate signals. The proposed AD-based method outperforms the SNR-difference baseline [171], achieving 40–60% lower MDR." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 307, + 210, + 564, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 210, + 564, + 342 + ], + "spans": [ + { + "bbox": [ + 307, + 210, + 564, + 342 + ], + "type": "text", + "content": "Lesson Learned. Leveraging physical-layer characteristics, such as PUFs and channel properties, in conjunction with communication models and optimization algorithms, has proven effective in enhancing authentication accuracy and reducing detection errors. However, some methods also reveal limitations. For instance, the assumptions of ideal channel conditions and the neglect of practical implementation constraints may limit the applicability of the proposed solutions [76], [77]. Future research should focus on addressing these limitations by exploring more practical channel models and considering the trade-offs between security and system complexity." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 324, + 357, + 548, + 369 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 357, + 548, + 369 + ], + "spans": [ + { + "bbox": [ + 324, + 357, + 548, + 369 + ], + "type": "text", + "content": "IV. COMMUNICATION AVAILABILITY FOR LAENET" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 308, + 373, + 419, + 385 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 373, + 419, + 385 + ], + "spans": [ + { + "bbox": [ + 308, + 373, + 419, + 385 + ], + "type": "text", + "content": "A. Anti-Jamming Strategy" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 307, + 389, + 564, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 389, + 564, + 543 + ], + "spans": [ + { + "bbox": [ + 307, + 389, + 564, + 543 + ], + "type": "text", + "content": "Jamming attacks pose significant challenges to communication availability in the LAENet by disrupting legitimate communication links and degrading the performance of aircraft communication networks [79], [172]. As shown in Fig. 10, these attacks can exploit the openness and broadcasting nature of UAV networks, making them particularly vulnerable to interference [79]. Malicious jammers can transmit strong signals that weaken signal strength, degrade signal quality, and increase communication delays, leading to unreliable coverage and potential paralysis of the entire network [172], [173]. This vulnerability underscores the urgent need for effective anti-jamming technologies to ensure reliable communication in the LAENet." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 544, + 564, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 544, + 564, + 688 + ], + "spans": [ + { + "bbox": [ + 307, + 544, + 564, + 688 + ], + "type": "text", + "content": "Various anti-jamming strategies have been explored to safeguard the LAENet against malicious jamming, mainly focusing on trajectory adjustment, as well as channel and power control. Overall, by adjusting the trajectory in the spatial domain, an UAV can evade jamming signals while maintaining reliable communication with legitimate devices [80], [173]. Besides the spatial-domain anti-jamming strategy, the UAV can implement a frequency-domain-based anti-jamming scheme. The UAV can select legitimate channels while avoiding jamming signals and control transmit power to minimize energy consumption and latency under jamming attacks [83], [84]." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 689, + 564, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 689, + 564, + 749 + ], + "spans": [ + { + "bbox": [ + 307, + 689, + 564, + 749 + ], + "type": "text", + "content": "Convex optimization methods can be used to adjust the UAV's trajectory to achieve anti-jamming by strategically guiding its movement to reduce interference and enhance communication reliability [80], [173]. It provides a systematic and efficient approach to handle the complex, non-convex" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 54, + 92, + 556, + 266 + ], + "blocks": [ + { + "bbox": [ + 51, + 56, + 559, + 83 + ], + "lines": [ + { + "bbox": [ + 51, + 56, + 559, + 83 + ], + "spans": [ + { + "bbox": [ + 51, + 56, + 559, + 83 + ], + "type": "text", + "content": "TABLE VIII SUMMARY OF ANTI-JAMMING STRATEGY FOR COMMUNICATION AVAILABILITY CIRCLES DESCRIBE THE METHODS; CORRECT MARKERS AND CROSS MARKERS REPRESENT PROS AND CONS RESPECTIVELY." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 54, + 92, + 556, + 266 + ], + "lines": [ + { + "bbox": [ + 54, + 92, + 556, + 266 + ], + "spans": [ + { + "bbox": [ + 54, + 92, + 556, + 266 + ], + "type": "table", + "html": "
TechniquesReferenceAlgorithmPros & Cons
Convex optimization[80]BCD, SCABCD and SCA for UAV 3D trajectory optimization for anti-jamming\n✓ Probabilistic LoS performs well in real-world scenarios such as urban environments\nX High computational complexity may be challenging in resource-constrained environments
[81]SCA, DinkelbachSCA and Dinkelbach algorithm for energy-efficient trajectory optimization under malicious jammers\n✓ Balance between throughput and energy consumption in anti-jamming\nX Assume static and LoS-dominated channels
[82]BCD, SCABCD and SCA for joint UAV trajectory and transmit power optimization under jamming\n✓ Improve throughput by considering transmit power optimization against jammers\nX Assume a fixed UAV altitude and a static channel environment
Multi-agent RL[87]MALQLCollaborative MALQL algorithm for anti-jamming with channel and power allocation\n✓ Accelerate convergence compared to single-agent Q-learning\nX Assume predefined UAV trajectories limits to adaptability
[88]MARLMARL with adversarial pre-training for dynamic and generalized jamming\n✓ Generalize to various jamming patterns via adversarial populations for pre-training\nX Pre-training for generalized jamming may require significant offline resources
[89]MATD3MATD3 algorithm with PER for dynamic resource management under jamming attacks\n✓ Handle high-dimensional continuous action spaces\nX The integration of PER and spectrum sensing may increase the computational complexity
", + "image_path": "ac755b49caad673c5969aa85578ccfc3367cd5587e9664d857f416a9905f81b7.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 286, + 301, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 286, + 301, + 430 + ], + "spans": [ + { + "bbox": [ + 45, + 286, + 301, + 430 + ], + "type": "text", + "content": "problems that arise when optimizing UAV trajectories and various constraints under malicious jamming conditions [131]. The work in [80] investigates anti-jamming 3D trajectory design for UAV-enabled wireless sensor networks under a probabilistic LoS channel model. The probabilistic LoS model accounts for elevation angle-dependent shadowing effects in urban environments compared with simplified LoS models. The BCD and SCA algorithms are employed to optimize the UAV's horizontal and vertical trajectory, allowing the UAV to move closer to the ground station for improved transmission rates while dynamically adjusting its elevation angle relative to the jammer to mitigate interference." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 434, + 301, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 434, + 301, + 661 + ], + "spans": [ + { + "bbox": [ + 45, + 434, + 301, + 661 + ], + "type": "text", + "content": "However, the anti-jamming trajectory optimization in [80] under the probabilistic LoS model does not consider the energy consumption issue. The study in [81] utilizes SCA and Dinkelbach's algorithm to adjust the UAV's trajectory to avoid areas with jammers while maximizing energy efficiency, which is defined as the ratio of total throughput to propulsion energy consumption during flight. Compared to hovering-centric benchmarks, the optimized trajectory reduced energy consumption by " + }, + { + "bbox": [ + 45, + 434, + 301, + 661 + ], + "type": "inline_equation", + "content": "82\\%" + }, + { + "bbox": [ + 45, + 434, + 301, + 661 + ], + "type": "text", + "content": " while maintaining " + }, + { + "bbox": [ + 45, + 434, + 301, + 661 + ], + "type": "inline_equation", + "content": "73.16\\%" + }, + { + "bbox": [ + 45, + 434, + 301, + 661 + ], + "type": "text", + "content": " of the sum throughput. It is worth noting that the transmit power of the UAV and station is fixed in [81], whereas power optimization is also an important factor for energy efficiency. The authors in [82] use the SCA and BCD algorithms to maximize throughput by iteratively optimizing power allocation (via convex reformulation of throughput bounds) and UAV trajectory (via slack variables for distance constraints and jamming mitigation) to avoid jamming signals. The proposed scheme achieves " + }, + { + "bbox": [ + 45, + 434, + 301, + 661 + ], + "type": "inline_equation", + "content": "40\\%" + }, + { + "bbox": [ + 45, + 434, + 301, + 661 + ], + "type": "text", + "content": " higher throughput compared to the \"Line trajectory with fixed power\" baseline." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 664, + 301, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 664, + 301, + 750 + ], + "spans": [ + { + "bbox": [ + 45, + 664, + 301, + 750 + ], + "type": "text", + "content": "While convex optimization methods [80]–[82] work well for fixed jamming patterns, they may struggle to handle dynamic, intelligent jamming [174] in real-time due to their reliance on global information and the challenges inherent in solving nonconvex problems with increased optimized variables [142]. In contrast, RL and DRL offer significant advantages by enabling autonomous, adaptive decision-making [143], [147]. These" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 307, + 286, + 564, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 286, + 564, + 502 + ], + "spans": [ + { + "bbox": [ + 307, + 286, + 564, + 502 + ], + "type": "text", + "content": "approaches can continuously adjust to environmental changes, learn from past interactions, and optimize performance in real-time [144], [175]. The RL-based anti-jamming methods have emerged as a promising solution due to their ability to operate without excessive prior information (such as unknown environment, CSI, and jamming mode) [147]. Single-agent RL algorithms have been used in previous works to develop anti-jamming strategies in communication networks by regarding jammers and other legitimate users as part of the environment, including independent anti-jamming channel selection methods [83]–[86]. However, these single-agent approaches may fail to converge when dealing with a large number of agents or a high-dimensional action-state space [87], making them impractical for complex, multi-agent scenarios in the LAENet. To address these limitations, multi-agent RL (MARL) methods have been proposed to allow each agent to make decisions based on local information and exchange data with others (such as observations or model parameters)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 308, + 505, + 564, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 505, + 564, + 721 + ], + "spans": [ + { + "bbox": [ + 308, + 505, + 564, + 721 + ], + "type": "text", + "content": "The study in [87] proposes a collaborative multiagent layered Q-learning (MALQL) algorithm for anti-jamming communication in UAV networks by jointly optimizing channel and power allocation to maximize system Quality of Experience (QoE). The problem is modeled as a local interaction Markov game based on the constructed interference graph. The MALQL divides the problem into two subgames of channel selection (Layer 1) and power allocation (Layer 2), as shown in part B of Fig. 10. The channel layer uses a graph-based interference model to capture mutual interference among UAVs. Each UAV is represented as a node, and edges are formed between UAVs that are within a predefined interference distance. This model allows UAVs to identify and avoid channels that are being used by neighboring UAVs or jammed by external attackers, thereby reducing the jamming likelihood. The power layer optimizes transmit power to meet rate thresholds. Theoretical analysis confirms that MALQL can converge to a pure strategy Nash equilibrium." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 308, + 724, + 564, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 724, + 564, + 750 + ], + "spans": [ + { + "bbox": [ + 308, + 724, + 564, + 750 + ], + "type": "text", + "content": "Nevertheless, there are still some issues with the anti-jamming mechanism in [87]. Considering that the rapid mo" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 52, + 72, + 199, + 169 + ], + "blocks": [ + { + "bbox": [ + 51, + 57, + 200, + 68 + ], + "lines": [ + { + "bbox": [ + 51, + 57, + 200, + 68 + ], + "spans": [ + { + "bbox": [ + 51, + 57, + 200, + 68 + ], + "type": "text", + "content": "Part A. Overall system model under jamming" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 52, + 72, + 199, + 169 + ], + "lines": [ + { + "bbox": [ + 52, + 72, + 199, + 169 + ], + "spans": [ + { + "bbox": [ + 52, + 72, + 199, + 169 + ], + "type": "image", + "image_path": "139af8a9a84b86d4198cc46ae8f1d98f29cd82458280ac122e062d30d4140bb7.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 209, + 72, + 361, + 171 + ], + "blocks": [ + { + "bbox": [ + 210, + 57, + 356, + 68 + ], + "lines": [ + { + "bbox": [ + 210, + 57, + 356, + 68 + ], + "spans": [ + { + "bbox": [ + 210, + 57, + 356, + 68 + ], + "type": "text", + "content": "Part B. MALQL-based anti-jamming scheme" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 209, + 72, + 361, + 171 + ], + "lines": [ + { + "bbox": [ + 209, + 72, + 361, + 171 + ], + "spans": [ + { + "bbox": [ + 209, + 72, + 361, + 171 + ], + "type": "image", + "image_path": "4cb234887973d9ac25e079403f546e492bf9527c15b28c61b6fc521af34f7172.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 369, + 79, + 553, + 169 + ], + "blocks": [ + { + "bbox": [ + 368, + 57, + 553, + 76 + ], + "lines": [ + { + "bbox": [ + 368, + 57, + 553, + 76 + ], + "spans": [ + { + "bbox": [ + 368, + 57, + 553, + 76 + ], + "type": "text", + "content": "Part C. Population update of pre-training for generalized anti-jamming scheme" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 369, + 79, + 553, + 169 + ], + "lines": [ + { + "bbox": [ + 369, + 79, + 553, + 169 + ], + "spans": [ + { + "bbox": [ + 369, + 79, + 553, + 169 + ], + "type": "image", + "image_path": "fcb150feb0c7dfcec3aa13563eba74bebc6a468a677e0d900fc102f3fcfec36c.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 55, + 198, + 301, + 291 + ], + "blocks": [ + { + "bbox": [ + 54, + 179, + 216, + 190 + ], + "lines": [ + { + "bbox": [ + 54, + 179, + 216, + 190 + ], + "spans": [ + { + "bbox": [ + 54, + 179, + 216, + 190 + ], + "type": "text", + "content": "Part D. PER-MATD3-based anti-jamming scheme" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 55, + 198, + 301, + 291 + ], + "lines": [ + { + "bbox": [ + 55, + 198, + 301, + 291 + ], + "spans": [ + { + "bbox": [ + 55, + 198, + 301, + 291 + ], + "type": "image", + "image_path": "bc80ee2b36b7d014602c429392c608bf414042951a60d35efc799078722210b7.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 310, + 198, + 550, + 293 + ], + "blocks": [ + { + "bbox": [ + 466, + 186, + 550, + 194 + ], + "lines": [ + { + "bbox": [ + 466, + 186, + 550, + 194 + ], + "spans": [ + { + "bbox": [ + 466, + 186, + 550, + 194 + ], + "type": "text", + "content": "Actor-Critic network for agent k" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 310, + 198, + 550, + 293 + ], + "lines": [ + { + "bbox": [ + 310, + 198, + 550, + 293 + ], + "spans": [ + { + "bbox": [ + 310, + 198, + 550, + 293 + ], + "type": "image", + "image_path": "7c7009c9e038fcda366a5af32ce1c4b450452de01631391440f888fae30fd149.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 312, + 564, + 369 + ], + "lines": [ + { + "bbox": [ + 45, + 312, + 564, + 369 + ], + "spans": [ + { + "bbox": [ + 45, + 312, + 564, + 369 + ], + "type": "text", + "content": "Fig. 8. The overall architecture illustrates various reinforcement learning-based anti-jamming schemes designed to enhance communication reliability in UAV-assisted MEC systems under jamming attacks. Part A presents the overall system model, depicting UAVs and jammers interacting within a dynamic environment. Part B shows the MALQL-based anti-jamming scheme, where agents use layered Q-learning to determine actions based on local observations and rewards. Part C depicts the population update mechanism for pre-training a generalized anti-jamming scheme, involving a jammer population, trajectory encoder, and decoder network to optimize jamming policies. Part D introduces the PER-MATD3-based anti-jamming scheme, incorporating a priority experience replay buffer and actor-critic networks to dynamically allocate resources and optimize UAV deployment strategies." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 45, + 388, + 300, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 388, + 300, + 687 + ], + "spans": [ + { + "bbox": [ + 45, + 388, + 300, + 687 + ], + "type": "text", + "content": "bility of UAVs may expose them to various and unknown jamming patterns due to frequent transitions to new scenarios, the anti-jamming methods need to be generalized [176], especially in the LAENet. The work [87] randomly initializes strategies and learns from scratch for a particular deployment environment with no pretraining, which may lead to a reduction in the generalization ability of the anti-jamming strategy. In light of this, the authors in [88] introduce an adversarial pre-training stage in the proposed two-stage MARL with a decentralized partially observable Markov decision process. Specifically, the adversarial pre-training stage uses a quality-diverse jammer population (e.g., fixed, random, sweeping, statistic, and RL-based jamming) to bootstrap generalized anti-jamming strategies instead of directly initializing the agents with random anti-jamming policies, as shwon in part C of Fig. 10. This pre-training ensures that UAVs are not overfitted to specific jamming patterns and can generalize to new jamming attacks in real-world deployments. The pre-trained policies are deployed in the fine-tuning stage, where a graph convolutional-based MARL algorithm is proposed to jointly optimize channel selection and power allocation for anti-jamming similar to [87]. Simulation results show that the proposed solution achieves " + }, + { + "bbox": [ + 45, + 388, + 300, + 687 + ], + "type": "inline_equation", + "content": "20 - 30\\%" + }, + { + "bbox": [ + 45, + 388, + 300, + 687 + ], + "type": "text", + "content": " higher cumulative rewards than collaborative multi-agent Q-learning [177] and independent Q-learning [83] under fixed and sweeping jamming." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 45, + 689, + 301, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 689, + 301, + 750 + ], + "spans": [ + { + "bbox": [ + 45, + 689, + 301, + 750 + ], + "type": "text", + "content": "Note that previous RL-based anti-jamming strategies [87], [88] mainly rely on the Q-learning method, which is suitable for discrete action spaces but may be limited in dealing with high-dimensional continuous spaces [147], [148]. The authors in [89] propose a PER-MATD3 algorithm against jamming" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 388, + 564, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 388, + 564, + 604 + ], + "spans": [ + { + "bbox": [ + 307, + 388, + 564, + 604 + ], + "type": "text", + "content": "by integrating spectrum-aware channel selection and prioritized experience replay (PER) into an MADRL framework, as shown in part D of Fig. 10. The proposed spectrum-aware intelligent channel selection uses energy detection-based spectrum sensing, enabling UAVs to identify and avoid jammed channels. The TD3 is specifically designed to handle continuous-valued states and actions, where two critic networks, target policy smoothing, and delayed policy updates are used to further stabilize DRL training. By leveraging PER, the agents can learn from high-error experiences, thereby accelerating adaptation to time-varying CSI, imperfect jamming detection, and co-channel interference. By jointly optimizing CPU frequency, bandwidth allocation, and channel selection to minimize the impact of jamming, PER-MATD3 reduces system cost (a linear combination of latency and energy consumption) by approximately " + }, + { + "bbox": [ + 307, + 388, + 564, + 604 + ], + "type": "inline_equation", + "content": "16.7\\%" + }, + { + "bbox": [ + 307, + 388, + 564, + 604 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 307, + 388, + 564, + 604 + ], + "type": "inline_equation", + "content": "9.1\\%" + }, + { + "bbox": [ + 307, + 388, + 564, + 604 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 307, + 388, + 564, + 604 + ], + "type": "inline_equation", + "content": "1.2\\%" + }, + { + "bbox": [ + 307, + 388, + 564, + 604 + ], + "type": "text", + "content": " compared to the baselines of Q-learning, MATD3-JSC (without PER), and PER-MATD3 (without channel selection), respectively." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 605, + 564, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 605, + 564, + 749 + ], + "spans": [ + { + "bbox": [ + 308, + 605, + 564, + 749 + ], + "type": "text", + "content": "Lesson Learned. Recent advancements in anti-jamming strategies show that intelligent decision-making for trajectory control, channel selection, and power control is essential for effective jamming mitigation. A key takeaway is the successful integration of MARL to develop dynamic and adaptive anti-jamming solutions [75]. By employing intelligent algorithms such as adversarial pre-training and decentralized decision-making, UAV networks can generalize anti-jamming strategies across diverse environments [76], [77]. However, challenges persist in the generalization of these strategies across various jamming types and environmental conditions, as well as balancing the trade-offs between energy consumption," + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 51, + 68, + 296, + 174 + ], + "blocks": [ + { + "bbox": [ + 51, + 57, + 176, + 67 + ], + "lines": [ + { + "bbox": [ + 51, + 57, + 176, + 67 + ], + "spans": [ + { + "bbox": [ + 51, + 57, + 176, + 67 + ], + "type": "text", + "content": "Part A. ML-based spoofing detection" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 51, + 68, + 296, + 174 + ], + "lines": [ + { + "bbox": [ + 51, + 68, + 296, + 174 + ], + "spans": [ + { + "bbox": [ + 51, + 68, + 296, + 174 + ], + "type": "image", + "image_path": "c26ca86189ba5e15945340467e93f9446f6ab0e20c20d86854d84170b44e6d6a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 53, + 198, + 299, + 326 + ], + "blocks": [ + { + "bbox": [ + 54, + 184, + 211, + 194 + ], + "lines": [ + { + "bbox": [ + 54, + 184, + 211, + 194 + ], + "spans": [ + { + "bbox": [ + 54, + 184, + 211, + 194 + ], + "type": "text", + "content": "Part B. Rule and ML-based spoofing detection" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 53, + 198, + 299, + 326 + ], + "lines": [ + { + "bbox": [ + 53, + 198, + 299, + 326 + ], + "spans": [ + { + "bbox": [ + 53, + 198, + 299, + 326 + ], + "type": "image", + "image_path": "2ab8ba0297c1868c628e1e48264ec055ad4506c4314985e9ce67835ee4420c3e.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 340, + 301, + 432 + ], + "lines": [ + { + "bbox": [ + 45, + 340, + 301, + 432 + ], + "spans": [ + { + "bbox": [ + 45, + 340, + 301, + 432 + ], + "type": "text", + "content": "Fig. 9. The overall framework of ML and rule-based spoofing detection for GPS spoofing detection in the LAENet. Part A depicts an ML-based spoofing detection mechanism in [93], where multiple CNN classifiers are trained with updated sample weights to form an integrated classification model. Each CNN transfers its optimized parameters to subsequent classifiers, enhancing the model's robustness. Part B presents a hybrid rule and ML-based approach in [94], where statistical analysis of path losses between UAVs and multiple base stations (BSs) is performed by edge servers. The analyzed data is processed through MLPs to generate individual predictions, which are aggregated to produce a final spoofing detection decision." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 453, + 300, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 453, + 300, + 525 + ], + "spans": [ + { + "bbox": [ + 45, + 453, + 300, + 525 + ], + "type": "text", + "content": "latency, and throughput. Future research could delve into the integration of more adaptive learning frameworks (such as deep learning) into the LAENet for anti-jamming, enabling it to better manage partial or imperfect environmental observations for low-latency, real-time decision-making in multi-UAV systems." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 541, + 134, + 553 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 541, + 134, + 553 + ], + "spans": [ + { + "bbox": [ + 46, + 541, + 134, + 553 + ], + "type": "text", + "content": "B. Spoofing Defense" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 557, + 301, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 557, + 301, + 750 + ], + "spans": [ + { + "bbox": [ + 45, + 557, + 301, + 750 + ], + "type": "text", + "content": "In the LAENet, the openness of A2G transmission channels and the dynamic nature of low-altitude aircraft networks make them particularly susceptible to identity-based spoofing attacks [50]. In such attacks, a malicious entity impersonates a legitimate transmitter using falsified identity information, such as a spoofed media access control address, to gain unauthorized access to the network [52]. Once authenticated, the spoofer can disrupt communications among aircraft by launching more severe attacks, such as rogue access point infiltration and denial-of-service attacks, ultimately leading to network congestion and service outages [75]. Given the limitations of conventional authentication methods that rely on complex cryptographic protocols [52], PLA offers a promising alternative by leveraging the inherent and unique physical characteristics of wireless transmissions for the LAENet, which is introduced in Section III-B. Overall, this type of PLA can defend against spoofing" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 307, + 55, + 563, + 102 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 55, + 563, + 102 + ], + "spans": [ + { + "bbox": [ + 307, + 55, + 563, + 102 + ], + "type": "text", + "content": "attacks in the LAENet by exploiting the unique characteristics of the wireless channel (such as RSS, Rayleigh channel, and Rician channel) to identify and separate between legitimate devices and spoofers." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 307, + 102, + 564, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 102, + 564, + 389 + ], + "spans": [ + { + "bbox": [ + 307, + 102, + 564, + 389 + ], + "type": "text", + "content": "The work in [90] proposes a PLA framework to detect spoofing attacks by exploiting spatial correlations of RSS in A2G channels. The key idea is that the RSS from a legitimate transmitter will remain relatively consistent due to its fixed location, while the RSS from a spoofer will vary significantly because of its different position and channel conditions. Thus, the UAV receiver can perform a hypothesis test to authenticate incoming signals. if the RSS distance between the current signal and a previously authenticated signal is below a predefined threshold, the signal is accepted as legitimate. Otherwise, it is flagged as a spoofing attempt. However, the work [90] is considered under an ideal transmission scenario, where the propagation environment is perfectly exempted from external interference. To address this limitation, the authors in [91] develop a PLA framework that accounts for channel randomness and interference uncertainty. First, they model the G2A link as a Rayleigh fading channel. Then, they introduce jamming signals as external interference. By modeling the jamming power statistically and incorporating it into the analysis of detected power differences, if the difference in power exceeds a predefined threshold, it is identified as a spoofing attempt. Thus, even in real-world scenarios with interference, the proposed framework can better differentiate between natural channel fading and anomalies caused by spoofing attacks." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 307, + 389, + 564, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 389, + 564, + 640 + ], + "spans": [ + { + "bbox": [ + 307, + 389, + 564, + 640 + ], + "type": "text", + "content": "In addition to using the statistical properties of the Rayleigh channel to design PLA against spoofing in environments with multipath fading (such as urban areas), the channel characteristics in suburban environments should also be considered. To address this, the work [77] proposes a PLA framework to counter spoofing attacks in both urban (Rayleigh channel) and suburban (Rician channel) environments. As mentioned earlier (in Section III-B), a new metric AD is devised to distinguish between legitimate signals and spoofing signals based on differences in channel randomness and geographical factors, such as elevation angles and distances. Adopting the unique fading characteristics of Rayleigh and Rician channels makes it statistically difficult for a spoofer to accurately mimic a legitimate signal. By considering elevation angles and distances in channel modeling, it ensures that a spoofer cannot easily replicate a legitimate signal even if the spoofer knows the legitimate transmitter's location. Simulation results show that the probability of a successful spoofing attack is significantly reduced compared to the baseline [171], where the spoofing miss detection probability drops to 0.014 in suburban environments and 0.371 in dense urban areas." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 307, + 641, + 564, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 641, + 564, + 748 + ], + "spans": [ + { + "bbox": [ + 307, + 641, + 564, + 748 + ], + "type": "text", + "content": "In the LAENet, in addition to being vulnerable to identity-based spoofing attacks, aircrafts are also susceptible to signal spoofing attacks from the Global Navigation Satellite System (GNSS), particularly GPS spoofing, which poses a significant security threat by generating and transmitting counterfeit satellite signals resulting in severe positioning deviations [25]. By interfering with or suppressing legitimate GNSS signals, attackers can manipulate UAV locations in an imperceptible manner to mislead UAVs, causing deviations from intended" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 52, + 92, + 558, + 275 + ], + "blocks": [ + { + "bbox": [ + 51, + 56, + 558, + 83 + ], + "lines": [ + { + "bbox": [ + 51, + 56, + 558, + 83 + ], + "spans": [ + { + "bbox": [ + 51, + 56, + 558, + 83 + ], + "type": "text", + "content": "TABLE IX SUMMARY OF SPOOFING DEFENSE FOR COMMUNICATION AVAILABILITY CIRCLES DESCRIBE THE METHODS; CORRECT MARKERS AND CROSS MARKERS REPRESENT PROS AND CONS RESPECTIVELY." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 52, + 92, + 558, + 275 + ], + "lines": [ + { + "bbox": [ + 52, + 92, + 558, + 275 + ], + "spans": [ + { + "bbox": [ + 52, + 92, + 558, + 275 + ], + "type": "table", + "html": "
TechniquesReferenceAlgorithmPros & Cons
PLA[90]RSSSpatial correlations of RSS distances in PLA against spoofing attacksUse RSS-based channel characteristics to reduce PLA computational complexityAssume an ideal transmission scenario without external interference
[91]Rayleigh channelDefend against spoofing attacks by considering channel randomness and jammingSimultaneously address spoofing and jamming attacks via PLAAssume static UAVs and a known jamming distribution
[77]Rayleigh and Rician channelsAD-based PLA for spoofing defense under Rayleigh and Rician channelsProvide a thorough analysis of spoofer identification in urban and suburban environmentsAssume perfect CSI in channel modeling
GNSS spoofing detection[92]Rule-based detectionCombine cooperative localization mechanism with Stackelberg game against spoofing attacksSpoofing detection is based on neighboring UAV signal sources without predefined thresholdsExtending to larger UAV groups may require complex adjustments
[93]ML-based detectionImproved AdaBoost-CNN for multi-modal spoofing attack identificationHigh accuracy in identifying spoofing attacks with limited data samplesDependence on predefined signal features may lead to model overfitting
[94]Rule & ML-based detectionMLP and statistical feature extraction on path-loss data for detecting GPS spoofingNo additional hardware/energy burden on UAVsRobust performance under sparse base station coverageSpoofing detection performance degrades in areas with unstable cellular signals
", + "image_path": "35f27e74f7c34c861495fa1c76195ab51abea9e536ca4fcaa9d1d7ca44689523.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 296, + 301, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 296, + 301, + 464 + ], + "spans": [ + { + "bbox": [ + 45, + 296, + 301, + 464 + ], + "type": "text", + "content": "flight paths, violations of no-fly zone regulations, or increased collision risks [46]. Given a critical role of GNSS in UAV operations, effective detection and mitigation strategies for spoofing attacks are essential to ensure flight safety and prevent security breaches in the LAENet. Currently, studies on signal spoofing attack recognition in the LAENet mostly focuses on recognizing GNSS spoofing attack detection, which primarily falls into two categories with respect on rule-based and ML-based methods [19], [25]. Rule-based detection methods typically assess the relative distance and positional deviations of UAVs to determine if they are under GNSS spoofing attack. On the other hand, the ML-based methods pay attention to recognize different spoofing types by learning the characteristics of received signals." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 473, + 301, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 473, + 301, + 750 + ], + "spans": [ + { + "bbox": [ + 45, + 473, + 301, + 750 + ], + "type": "text", + "content": "Generally, the simplified rule-based methods determine whether a UAV has encountered spoofing attacks based on whether its trajectory follows a predictable path [178], [179], since a UAV may exhibit deviations from this path due to the false signals imposed by the spoofer. If the measured deviations exceed predefined thresholds, it indicates a potential spoofing attack. However, relying on predefined thresholds for detecting deviations may not dynamically adapt to the spoofing attacks. The study in [92] proposes a defense mechanism based on cooperative localization, where each UAV uses the relative distances and real locations of neighboring UAVs to detect spoofing attacks. Specifically, each UAV measures its relative distances based on alternative signal sources of neighboring UAVs and compares these results with its own GPS-derived location. If inconsistencies are detected (e.g., the GPS-derived location does not match the majority of the calculated locations), the UAV identifies itself or a neighboring UAV as being under attack. To optimize defense deployment, an equilibrium of a dynamic Stackelberg game is derived between the drone operator (leader) and the spoofer (follower). Simulation results show that the proposed scheme can effectively prevent spoofer's capture, while random/deterministic baselines suffer from attackers capturing one to two UAVs." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 307, + 296, + 564, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 296, + 564, + 606 + ], + "spans": [ + { + "bbox": [ + 307, + 296, + 564, + 606 + ], + "type": "text", + "content": "Recent ML-based methods for spoofing defense primarily focus on binary classification concerning normal GPS signals and spoofing signals [180], [181]. However, they fail to recognize specific types of spoofing attack necessary for countermeasures in complex environments. Hence, there is an urgent need to recognize diverse GPS spoofing attack patterns for effective countermeasures for the LAENet. The authors in [93] propose an improved AdaBoost-CNN algorithm to address the challenge of recognizing diverse GPS spoofing attack patterns for UAVs, as shown in part A of Fig. 9. Three categorized spoofing attack patterns are considered including static and dynamic spoofing based on the UAV's motion state, power-matched and overpowered spoofing based on signal power, and position and time spoofing based on the spoofing targets. The authors select key GPS spoofing signal features such as signal quality monitoring, carrier-to-noise ratio, Doppler shift, and clock error to train the classification model. The improved AdaBoost-CNN algorithm integrates multiple weak CNN classifiers into a strong classification model. Each CNN base classifier uses the updated network parameters from the previous CNN as initial values, enabling iterative refinement of network weights to enhance feature extraction and generalization. With 800 simulated samples, the improved AdaBoost-CNN achieves " + }, + { + "bbox": [ + 307, + 296, + 564, + 606 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 307, + 296, + 564, + 606 + ], + "type": "text", + "content": " accuracy, outperforming original AdaBoost-CNN " + }, + { + "bbox": [ + 307, + 296, + 564, + 606 + ], + "type": "inline_equation", + "content": "(94.38\\%)" + }, + { + "bbox": [ + 307, + 296, + 564, + 606 + ], + "type": "text", + "content": ", CNN " + }, + { + "bbox": [ + 307, + 296, + 564, + 606 + ], + "type": "inline_equation", + "content": "(74.38\\%)" + }, + { + "bbox": [ + 307, + 296, + 564, + 606 + ], + "type": "text", + "content": ", DNN " + }, + { + "bbox": [ + 307, + 296, + 564, + 606 + ], + "type": "inline_equation", + "content": "(60.94\\%)" + }, + { + "bbox": [ + 307, + 296, + 564, + 606 + ], + "type": "text", + "content": ", SVM " + }, + { + "bbox": [ + 307, + 296, + 564, + 606 + ], + "type": "inline_equation", + "content": "(40.63\\%)" + }, + { + "bbox": [ + 307, + 296, + 564, + 606 + ], + "type": "text", + "content": ", and KNN " + }, + { + "bbox": [ + 307, + 296, + 564, + 606 + ], + "type": "inline_equation", + "content": "(53.13\\%)" + }, + { + "bbox": [ + 307, + 296, + 564, + 606 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 308, + 616, + 564, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 616, + 564, + 750 + ], + "spans": [ + { + "bbox": [ + 308, + 616, + 564, + 750 + ], + "type": "text", + "content": "Furthermore, integrating rule-based approaches with machine learning-based methods provides an effective and robust defense against spoofing attacks. The work in [94] leverages statistical features of path losses between UAVs and terrestrial base stations to detect a UAV's trajectory deviation due to GPS spoofing, as shown in part B of Fig. 9. The spoofing detection is formulated as a nonlinear optimization problem that aims to minimize hypothesis test errors by adjusting thresholds, statistical feature weights, and the number of base stations. To further accurately analyze path loss's statistical features for final decisions on predicting GPS spoofing probabilities," + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 555, + 25, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 25, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 555, + 25, + 563, + 32 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 61, + 92, + 549, + 275 + ], + "blocks": [ + { + "bbox": [ + 50, + 56, + 559, + 84 + ], + "lines": [ + { + "bbox": [ + 50, + 56, + 559, + 84 + ], + "spans": [ + { + "bbox": [ + 50, + 56, + 559, + 84 + ], + "type": "text", + "content": "TABLE X SUMMARY OF ANOMALY DETECTION FOR COMMUNICATION INTEGRITY CIRCLES DESCRIBE THE METHODS; CORRECT MARKERS AND CROSS MARKERS REPRESENT PROS AND CONS RESPECTIVELY." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 61, + 92, + 549, + 275 + ], + "lines": [ + { + "bbox": [ + 61, + 92, + 549, + 275 + ], + "spans": [ + { + "bbox": [ + 61, + 92, + 549, + 275 + ], + "type": "table", + "html": "
Anomaly typeReferenceAlgorithmPros & Cons
Jamming[98]HDBNSA module based on HDBN for detecting jamming anomalies\n✓ UAccurately characterize and detect jamming anomalies via KLD/DB metrics\n✗ Unstable initialization in unsupervised learning affects the performance of the HDBN
[99]GDBNGDBN to model the radio environment and detect and classify jamming anomalies\n✓ Unsupervised learning eliminates dependency on labeled data in classification of anomalies\n✗ Computational complexity increases with the number of jamming categories
[100]Active-GDBNActive-GDBN used to model UAV-jammer interactions for anomaly detection\n✓ Actively incorporate UAV's actions for faster adaptation and jamming detection\n✗ M-MJPF requires significant computational resources
[101]Blind channel estimation & ACSBlind channel estimation based on ACS properties to detect jammer signals\n✓ Does not rely on prior knowledge of the jammer's behavior\n✗ Assumes a specific structure of the multicarrier modulation format
Abnormal Power[102]Spectrum surveillanceLocal and cooperative detection of abnormal power emission\n✓ Handle both aggressive and passive power misuse\n✓ Cloud-based framework enables real-time closed-loop surveillance\n✗ Computational complexity increases with the number of SN
Eavesdropping[103]SVM & K-meansOne-class SVM and K-means clustering for detecting eavesdropping anomalies\n✓ One-class SVM and K-means are stable under varying eavesdropper power\n✗ Detection performance mainly depends on the quality and quantity of the ATD
", + "image_path": "b18000f7aeb2c577b647d747d82e6d530210f92f8540c1ae3fe4804b6814cb6a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 296, + 301, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 296, + 301, + 392 + ], + "spans": [ + { + "bbox": [ + 45, + 296, + 301, + 392 + ], + "type": "text", + "content": "multilayer perceptron (MLP) neural networks are deployed on edge cloud servers, where individual MLP models at each BS are used to analyze statistical features of path losses. Simulation results show that the proposed method achieves " + }, + { + "bbox": [ + 45, + 296, + 301, + 392 + ], + "type": "inline_equation", + "content": "97\\%" + }, + { + "bbox": [ + 45, + 296, + 301, + 392 + ], + "type": "text", + "content": " accuracy with two base stations and " + }, + { + "bbox": [ + 45, + 296, + 301, + 392 + ], + "type": "inline_equation", + "content": "83\\%" + }, + { + "bbox": [ + 45, + 296, + 301, + 392 + ], + "type": "text", + "content": " accuracy with a single base station, outperforming baseline approaches such as adaptive trustable residence area (ATRA), which necessitates three base stations for triangulation [182]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 392, + 301, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 392, + 301, + 608 + ], + "spans": [ + { + "bbox": [ + 45, + 392, + 301, + 608 + ], + "type": "text", + "content": "Lesson Learned. For identity spoofing in the LAENet, leveraging signal features such as received signal strength and channel randomness in PLA design is an effective approach [77], [90], [91]. On the other hand, employing rule-based or ML-based techniques can detect and mitigate GNSS signal spoofing [92]–[94]. While ML-based methods show promising performance, they are limited by factors such as computational complexity and dependency on large datasets. Rule-based methods are simpler but may struggle in dynamic or uncertain environments. Future research could explore the application of RL to develop adaptive and robust spoofing defense mechanisms in the LAENet, which has not yet been extensively studied. Different from the abovementioned approaches, RL dynamically learns from interactions with the environment, and its sequential decision-making ability enables UAVs and ground stations to optimize spoofing defense strategies based on continuous feedback [147], make it a promising direction for enhancing spoofing defense in the LAENet" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 621, + 276, + 633 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 621, + 276, + 633 + ], + "spans": [ + { + "bbox": [ + 70, + 621, + 276, + 633 + ], + "type": "text", + "content": "V. COMMUNICATION INTEGRITY FOR LAENET" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 637, + 142, + 649 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 637, + 142, + 649 + ], + "spans": [ + { + "bbox": [ + 45, + 637, + 142, + 649 + ], + "type": "text", + "content": "A. Anomaly Detection" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 652, + 301, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 652, + 301, + 750 + ], + "spans": [ + { + "bbox": [ + 45, + 652, + 301, + 750 + ], + "type": "text", + "content": "Due to the open nature of wireless channels and the dominant LoS links in the LAENet, communication becomes particularly vulnerable to a diverse range of anomalous behaviors such as abnormal jamming, abnormal transmission power, and covert eavesdropping [46], [49]. Specifically, malicious jammers sense spectrum activity and dynamically adapt their interference patterns to mislead the UAV into taking suboptimal or harmful actions [81], [95]. In parallel, abnormal" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 307, + 296, + 564, + 452 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 296, + 564, + 452 + ], + "spans": [ + { + "bbox": [ + 307, + 296, + 564, + 452 + ], + "type": "text", + "content": "power emissions, either due to device faults, selfish behavior, or malicious intent, can violate spectrum policies, introduce harmful interference, and disrupt cooperative spectrum sharing [96]. Additionally, the pervasive risk of eavesdropping is that adversaries exploit the UAV's uplink or downlink transmissions to intercept sensitive data [61], [67]. Thus, it is essential to detect and mitigate these abnormal activities in the LAENet. Different from previously reviewed approaches such as anti-eavesdropping (Section III-A) and anti-jamming (Section IV-A), anomaly detection is a method used to identify and mitigate unexpected deviations from or irregularities in normal operational patterns by monitoring communication channels in the LAENet [127], [183]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 308, + 461, + 564, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 461, + 564, + 750 + ], + "spans": [ + { + "bbox": [ + 308, + 461, + 564, + 750 + ], + "type": "text", + "content": "Jamming anomalies generally aim to disrupt the normal operation of UAV communication links, such as by injecting disruptive signals to interfere with the legitimate communication process. The study in [98] proposes a novel Self-Awareness (SA) module to leverage the radio to detect abnormal behaviors caused by jamming attacks for Cognitive UAV communications. The SA module unsupervisedly learns a generative model using a Hierarchical Dynamic Bayesian Network (HDBN) [184] to represent the joint distribution of random variables characterizing the radio environment at different levels of abstraction and across time, where the Modified Bayesian Filtering [185] is used to integrate multilevel abnormality measurements for online predictions of radio environmental states at different levels. Since jamming can disrupt and shift the distributions of the radio environment, the abnormalities can be detected by calculating the Kullback-Leibler Divergence (KLD) and Dhattacharyya distance (DB) [186] between predictive messages and diagnostic messages. The predictive messages are generated by the HDBN to capture the expected patterns of normal signals, and diagnostic messages reflect the actual state of the signal. The jammer's impact is characterized by calculating generalized errors based on shifts in amplitude, phase, and frequency of signals, allowing the radio to predict future activities of the jammer. The SA" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 553, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 553, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 553, + 24, + 563, + 32 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 51, + 72, + 310, + 167 + ], + "blocks": [ + { + "bbox": [ + 51, + 57, + 148, + 65 + ], + "lines": [ + { + "bbox": [ + 51, + 57, + 148, + 65 + ], + "spans": [ + { + "bbox": [ + 51, + 57, + 148, + 65 + ], + "type": "text", + "content": "Part A. HDBN-based Scheme" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 51, + 72, + 310, + 167 + ], + "lines": [ + { + "bbox": [ + 51, + 72, + 310, + 167 + ], + "spans": [ + { + "bbox": [ + 51, + 72, + 310, + 167 + ], + "type": "image", + "image_path": "77a3923ca021fd3efb360f7086344f55ac8a6517a1d3d497b159cd347884c6bc.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 53, + 186, + 309, + 234 + ], + "blocks": [ + { + "bbox": [ + 53, + 175, + 148, + 183 + ], + "lines": [ + { + "bbox": [ + 53, + 175, + 148, + 183 + ], + "spans": [ + { + "bbox": [ + 53, + 175, + 148, + 183 + ], + "type": "text", + "content": "Part B. GDBN-based Scheme" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 53, + 186, + 309, + 234 + ], + "lines": [ + { + "bbox": [ + 53, + 186, + 309, + 234 + ], + "spans": [ + { + "bbox": [ + 53, + 186, + 309, + 234 + ], + "type": "image", + "image_path": "90cd29842c633091f87cc0f88b31ff42f3aa3d723ca41aa33e5bffc52b6ea94e.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 322, + 74, + 555, + 236 + ], + "blocks": [ + { + "bbox": [ + 322, + 58, + 439, + 66 + ], + "lines": [ + { + "bbox": [ + 322, + 58, + 439, + 66 + ], + "spans": [ + { + "bbox": [ + 322, + 58, + 439, + 66 + ], + "type": "text", + "content": "Part C. Active-GDBN-based Scheme" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 322, + 74, + 555, + 236 + ], + "lines": [ + { + "bbox": [ + 322, + 74, + 555, + 236 + ], + "spans": [ + { + "bbox": [ + 322, + 74, + 555, + 236 + ], + "type": "image", + "image_path": "57804c0b57ed072e72c64d0c713f6844a0ec91912cdd542854e4d53830070bae.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 251, + 564, + 306 + ], + "lines": [ + { + "bbox": [ + 45, + 251, + 564, + 306 + ], + "spans": [ + { + "bbox": [ + 45, + 251, + 564, + 306 + ], + "type": "text", + "content": "Fig. 10. The overall architecture illustrates jamming anomaly detection to enhance communication integrity in the LAEnet. Part A presents an HDBN-based scheme focusing on hierarchical dynamic models to predict and detect abnormal signals caused by jammers. It details the transition probabilities between model states and the prediction of continuous states based on discrete superstates. Part B introduces a GDBN-based scheme, extending the HDBN approach by incorporating generalized states and observations, allowing for more nuanced predictions and error calculations. Part C depicts an Active-GDBN-based scheme, integrating UAV actions into the model to actively infer and adapt to the environment, thereby optimizing resource allocation and anti-jamming measures." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 326, + 301, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 326, + 301, + 361 + ], + "spans": [ + { + "bbox": [ + 45, + 326, + 301, + 361 + ], + "type": "text", + "content": "module achieves a near " + }, + { + "bbox": [ + 45, + 326, + 301, + 361 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 45, + 326, + 301, + 361 + ], + "type": "text", + "content": " abnormality detection accuracy, approximately " + }, + { + "bbox": [ + 45, + 326, + 301, + 361 + ], + "type": "inline_equation", + "content": "12\\%" + }, + { + "bbox": [ + 45, + 326, + 301, + 361 + ], + "type": "text", + "content": " higher than the traditional energy detector-based scheme." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 363, + 301, + 614 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 363, + 301, + 614 + ], + "spans": [ + { + "bbox": [ + 46, + 363, + 301, + 614 + ], + "type": "text", + "content": "Different from the previous work [98], which introduced the SA module using HDBN for anomaly detection, the authors in [99] propose a Generalized Dynamic Bayesian Network (GDBN)-based framework to enhance the SA module by further classifying the detected anomalies caused by multiple jammers. A generalized state-space model [184] is used to represent the evolving radio environment as a GDBN model learned in an unsupervised manner. Different from the KLD/DB metric in [1], Kullback-Leibler divergence and Bhattacharyya distance are used as abnormality measurements between predicted and observed signals to detect jamming. Once an abnormality indicative of jamming is detected, the UAV extracts the interfering signal and compares it with prelearned GDBN models (each representing a different jamming modulation scheme). By evaluating which pre-learned model best explains the extracted jamming signal, the UAV can not only detect the presence of a jammer but also classify its modulation type. Simulation results show that the GDBN-based method achieves an overall classification accuracy of " + }, + { + "bbox": [ + 46, + 363, + 301, + 614 + ], + "type": "inline_equation", + "content": "98\\%" + }, + { + "bbox": [ + 46, + 363, + 301, + 614 + ], + "type": "text", + "content": " at " + }, + { + "bbox": [ + 46, + 363, + 301, + 614 + ], + "type": "inline_equation", + "content": "\\mathrm{SNR} = 10" + }, + { + "bbox": [ + 46, + 363, + 301, + 614 + ], + "type": "text", + "content": " dB, outperforming LSTM " + }, + { + "bbox": [ + 46, + 363, + 301, + 614 + ], + "type": "inline_equation", + "content": "(88\\%)" + }, + { + "bbox": [ + 46, + 363, + 301, + 614 + ], + "type": "text", + "content": ", CNN " + }, + { + "bbox": [ + 46, + 363, + 301, + 614 + ], + "type": "inline_equation", + "content": "(67\\%)" + }, + { + "bbox": [ + 46, + 363, + 301, + 614 + ], + "type": "text", + "content": ", and SAE " + }, + { + "bbox": [ + 46, + 363, + 301, + 614 + ], + "type": "inline_equation", + "content": "(90\\%)" + }, + { + "bbox": [ + 46, + 363, + 301, + 614 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 45, + 616, + 301, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 616, + 301, + 749 + ], + "spans": [ + { + "bbox": [ + 45, + 616, + 301, + 749 + ], + "type": "text", + "content": "Based on the study [99], the authors in [100] propose an Active-GDBN to model the dynamic interaction between the UAV and jammer for anomaly detection. Similar to [99], the generalized state-space model [184] is used to capture the features and dynamic evolution of UAV signals to represent the radio environment. Differently from passive detection and classification of jamming signals in [99], the Active-GDBN achieves active anomaly detection by incorporating the UAV's actions into the inference process. Specifically, the UAV employs a Modified Markov Jump Particle Filter (M-MJPF) [187] to link the UAV's actions to environmental" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 307, + 325, + 563, + 398 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 325, + 563, + 398 + ], + "spans": [ + { + "bbox": [ + 307, + 325, + 563, + 398 + ], + "type": "text", + "content": "states and observations. Meanwhile, the UAV dynamically adjusts physical resource block selections to evade jamming by encoding jammer behavior and updating beliefs. The Active-GDBN achieves about " + }, + { + "bbox": [ + 307, + 325, + 563, + 398 + ], + "type": "inline_equation", + "content": "25\\%" + }, + { + "bbox": [ + 307, + 325, + 563, + 398 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 307, + 325, + 563, + 398 + ], + "type": "inline_equation", + "content": "37.5\\%" + }, + { + "bbox": [ + 307, + 325, + 563, + 398 + ], + "type": "text", + "content": " faster convergence on anomaly detection probability than the Q-learning-based baseline under various jamming types." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 307, + 399, + 564, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 399, + 564, + 674 + ], + "spans": [ + { + "bbox": [ + 307, + 399, + 564, + 674 + ], + "type": "text", + "content": "Different from previous works [98]–[100] that detect jamming anomalies based on the statistical distribution divergence of the signal, study [101] focuses on detecting anomalies by exploiting the time delays, shifts, and modulation of the signal characteristics. Firstly, achieving blind channel estimation involves constructing cyclic correlation matrices to identify distinct Doppler shifts and time delays associated with transmissions by exploiting the inherent almost-cyclostationary (ACS) properties of UAV and jammer signals (e.g., periodic statistics from OFDM modulation). Then, this blind estimation process is combined with a widely linear minimum mean square error (WL-MMSE) filter to provide an initial estimate of the symbol vector by leveraging the non-circular statistics of the received signal, where the initial estimate includes contributions from both the UAV and the jammer. Finally, a post-sorting algorithm (PSA) is employed to iteratively decode and cancel the jammer's contribution by ranking and removing symbols with the highest signal-to-disturbance-plus-noise ratio (SDNR). Simulation results demonstrate that the proposed scheme can effectively detect and separate the jamming signals from UAV signals without requiring prior knowledge of the jammer's characteristics, even when the jammer's power is twice as strong as the UAV's power." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 677, + 564, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 677, + 564, + 748 + ], + "spans": [ + { + "bbox": [ + 307, + 677, + 564, + 748 + ], + "type": "text", + "content": "In addition to jamming anomalies, which cause interference and security threats in the LAENet, abnormal power emissions in UAV communication networks also represent a critical type of anomaly, potentially leading to severe disruption of communication quality and violation of spectrum policies. The work in [102] proposes a cloud-based surveillance framework" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 555, + 25, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 25, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 555, + 25, + 563, + 32 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 59, + 92, + 552, + 239 + ], + "blocks": [ + { + "bbox": [ + 50, + 56, + 559, + 84 + ], + "lines": [ + { + "bbox": [ + 50, + 56, + 559, + 84 + ], + "spans": [ + { + "bbox": [ + 50, + 56, + 559, + 84 + ], + "type": "text", + "content": "TABLE XI SUMMARY OF INJECTION DEFENSE FOR COMMUNICATION INTEGRITY CIRCLES DESCRIBE THE METHODS; CORRECT MARKERS AND CROSS MARKERS REPRESENT PROS AND CONS RESPECTIVELY." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 59, + 92, + 552, + 239 + ], + "lines": [ + { + "bbox": [ + 59, + 92, + 552, + 239 + ], + "spans": [ + { + "bbox": [ + 59, + 92, + 552, + 239 + ], + "type": "table", + "html": "
Injection typeReferenceAlgorithmPros & Cons
Jamming signal[98]HDBNHDBN-based jamming signal extraction and suppression\n✓ Autonomous learning from raw I/Q data enables adaptability to dynamic jamming patterns\nX Assume the jammer's output power remains constant during attacks
[101]SICSIC with blind channel estimation for detecting and eliminating jamming signals\n✓ Eliminate jamming signals regardless of the mobility patterns of jammers\nX Rely on sufficient cyclostationary features in the received signal
[104]DBFDBF algorithm for nullifying jamming signals\n✓ Effective suppression of jamming signals while maintaining carrier phase integrity\nX May be limited to specific GNSS frequency bands
Spoofing signal[105], [106]API & LSRSIC combined with API and LSR to recover legitimate signals from spoofing attacks\n✓ SemperFi with a single antenna does not require additional hardware\nX Limited to attackers with a power advantage lower than 15 dB
[107]Subspace projectionSubspace projection for nullifying spoofing signals\n✓ Low parameter dependency, requiring only code delays and carrier frequencies\nX Suppression performance declines if spoofing and legitimate signals have similar code delays
", + "image_path": "aefb9e00909efa991b8f89e18e40ebc9e14f812ef0f371b6668cba7d6f813151.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 260, + 301, + 452 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 260, + 301, + 452 + ], + "spans": [ + { + "bbox": [ + 45, + 260, + 301, + 452 + ], + "type": "text", + "content": "to address the detection of abnormal power emissions, where the cloud server assigns spectrum resources to the UAVs and shares UAVs' spectrum usage information with the surveillance center. The surveillance center assigns the detection task to " + }, + { + "bbox": [ + 45, + 260, + 301, + 452 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 45, + 260, + 301, + 452 + ], + "type": "text", + "content": " surveillance nodes (SNs) for local detection of abnormal power emission, where the detection rule is based on the Lagrange multiplier method and the generalized likelihood ratio test. After local decisions, " + }, + { + "bbox": [ + 45, + 260, + 301, + 452 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 45, + 260, + 301, + 452 + ], + "type": "text", + "content": " SNs report results to the surveillance center, where cooperative detection of abnormal power emission is performed using the decision rule that declares an abnormal event when at least " + }, + { + "bbox": [ + 45, + 260, + 301, + 452 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 45, + 260, + 301, + 452 + ], + "type": "text", + "content": " out of " + }, + { + "bbox": [ + 45, + 260, + 301, + 452 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 45, + 260, + 301, + 452 + ], + "type": "text", + "content": " nodes detect an abnormality, where the optimal global threshold of " + }, + { + "bbox": [ + 45, + 260, + 301, + 452 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 45, + 260, + 301, + 452 + ], + "type": "text", + "content": " is determined by solving the constraints on the global false-alarm probabilities. Simulation results show that the global detection probability exceeds 90% when transmit power deviation exceeds 0.02W (allocated power is 0.01W)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 456, + 301, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 456, + 301, + 696 + ], + "spans": [ + { + "bbox": [ + 45, + 456, + 301, + 696 + ], + "type": "text", + "content": "Besides the threats of jamming and abnormal power emission, another critical anomaly that requires detection is eavesdropping in the LAENet, where malicious devices covertly intercept sensitive information during UAV-to-ground and UAV-to-UAV transmissions [66], [67]. Note that most previous works on anti-eavesdropping focused on measuring secure performance through secrecy rate and/or secrecy outage probability (such as [71], [76]) rather than emphasizing the detection of eavesdropping attacks. The work in [103] explores anomaly detection for eavesdropping attacks in UAV-aided wireless systems using unsupervised learning. Two datasets are prepared: artificial training data (ATD), simulated without eavesdropping based on CSI (all labeled normal), and a practical dataset extracted from received signal features (mean and variance of amplitude). Two types of unsupervised learning methods are designed for anomaly detection. One-class SVM maps data to a high-dimensional space, defining a normal region where outliers are detected. K-means clustering classifies test data into two clusters, labeling the one nearest to the ATD center as normal." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 700, + 301, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 700, + 301, + 750 + ], + "spans": [ + { + "bbox": [ + 45, + 700, + 301, + 750 + ], + "type": "text", + "content": "Lesson Learned For jamming anomalies, the statistical distribution divergence detection and signal structural feature-based detection, such as HDBN, GDBN, and ACS, are used to model the dynamic environment and detect deviations from" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 307, + 260, + 564, + 535 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 260, + 564, + 535 + ], + "spans": [ + { + "bbox": [ + 307, + 260, + 564, + 535 + ], + "type": "text", + "content": "learned normal patterns. For abnormal transmission power detection, a cloud-based surveillance framework supports a statistical distribution detection approach to monitor and identify power emission outliers. Leveraging its high computing power, the cloud enables cooperative analysis through multi-source data aggregation, dynamically optimizes detection thresholds using global information, and maintains a feedback loop for adaptive anomaly detection. For eavesdropping detection, unsupervised learning techniques, including One-Class SVM and K-means clustering, achieve the identification of anomalies in received signals. These approaches effectively achieve anomaly detection and demonstrate excellent performance. However, challenges remain, including the reliance on high-quality training data and the complexity of maintaining real-time adaptability in dynamic spectrum environments. Currently, Generative AI such as GANs and generative diffusion models presents a promising research direction for anomaly detection, as demonstrated in the use of generalized models in HDBN and the artificial data generation for training ML and clustering models in [188], [189]. Generative AI could further enrich training datasets and provide a high-level generative model to enhance anomaly detection in the dynamic and uncertain LAENet." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 309, + 562, + 398, + 574 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 562, + 398, + 574 + ], + "spans": [ + { + "bbox": [ + 309, + 562, + 398, + 574 + ], + "type": "text", + "content": "B. Injection Defense" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 307, + 580, + 564, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 580, + 564, + 748 + ], + "spans": [ + { + "bbox": [ + 307, + 580, + 564, + 748 + ], + "type": "text", + "content": "The low-altitude economy is highly dependent on open communication and network architecture with dense communication links, which brings injection attacks as a significant threat to UAV communication integrity [28], [46]. These attacks involve the deliberate injection of malicious signals, such as jamming and spoofing signals, to disrupt or manipulate legitimate communications [97], [190]. Jamming signal injection can make legitimate signals unrecognizable by emitting high-power electromagnetic interference to degrade signal reception [98]. Additionally, spoofing signal injection can transmit high-power signals to overshadow legitimate GNSS signals. Therefore, eliminating injection signals or separating them from legitimate signals is crucial for ensuring communication integrity in the LAENet." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 554, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 554, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 554, + 24, + 563, + 32 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 52, + 70, + 284, + 276 + ], + "blocks": [ + { + "bbox": [ + 53, + 57, + 236, + 66 + ], + "lines": [ + { + "bbox": [ + 53, + 57, + 236, + 66 + ], + "spans": [ + { + "bbox": [ + 53, + 57, + 236, + 66 + ], + "type": "text", + "content": "Part A. SNDR-based SIC for jamming injection defense" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 52, + 70, + 284, + 276 + ], + "lines": [ + { + "bbox": [ + 52, + 70, + 284, + 276 + ], + "spans": [ + { + "bbox": [ + 52, + 70, + 284, + 276 + ], + "type": "image", + "image_path": "0a2b881bbb436b377c3415cb335afd7cf07d689d5c423c459fd8a5651c2607af.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 302, + 77, + 558, + 187 + ], + "blocks": [ + { + "bbox": [ + 302, + 58, + 498, + 67 + ], + "lines": [ + { + "bbox": [ + 302, + 58, + 498, + 67 + ], + "spans": [ + { + "bbox": [ + 302, + 58, + 498, + 67 + ], + "type": "text", + "content": "Part B. SIC with API and LSR for spoofing injection defense" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 302, + 77, + 558, + 187 + ], + "lines": [ + { + "bbox": [ + 302, + 77, + 558, + 187 + ], + "spans": [ + { + "bbox": [ + 302, + 77, + 558, + 187 + ], + "type": "image", + "image_path": "95fa115c55a605022b9fd1a951542bb487ecec131003b7074dab8d10a903cb98.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 303, + 219, + 553, + 269 + ], + "blocks": [ + { + "bbox": [ + 301, + 200, + 495, + 210 + ], + "lines": [ + { + "bbox": [ + 301, + 200, + 495, + 210 + ], + "spans": [ + { + "bbox": [ + 301, + 200, + 495, + 210 + ], + "type": "text", + "content": "Part C. Subspace projection for spoofing injection defense" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 303, + 219, + 553, + 269 + ], + "lines": [ + { + "bbox": [ + 303, + 219, + 553, + 269 + ], + "spans": [ + { + "bbox": [ + 303, + 219, + 553, + 269 + ], + "type": "image", + "image_path": "74dd6b7f5134e0d980159677bb8d2dbfb39a1c546207ad08c741ba3fb68171b0.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 287, + 564, + 326 + ], + "lines": [ + { + "bbox": [ + 45, + 287, + 564, + 326 + ], + "spans": [ + { + "bbox": [ + 45, + 287, + 564, + 326 + ], + "type": "text", + "content": "Fig. 11. The overall architecture of injection defense mechanisms for UAVs in smart city applications. Part A presents the SIC architecture that processes channel state information to defend against jamming injection attacks [101]. Part B shows an SIC architecture integrated with API and LSR modules, which subtracts injection signals from the received signal to recover normal signals [105], [106]. Part C depicts a subspace projection-based architecture for spoofing injection defense, where the received signal is projected onto the orthogonal null space of the spoofing signals to eliminate them [107]." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 345, + 301, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 345, + 301, + 622 + ], + "spans": [ + { + "bbox": [ + 45, + 345, + 301, + 622 + ], + "type": "text", + "content": "The UAV's communication can be severely disrupted by jammers that exploit LoS propagation to inject jamming signals into the transmission channel, which may effectively mask legitimate signals and render them unrecognizable [101]. The work in [98] proposes an HDBN-based injection defense scheme to extract and remove the jammer's signal. This work first utilizes the HDBN to detect abnormal behaviors caused by jamming attacks, as mentioned earlier in Section V-A. Once the jammer's presence is confirmed, its signal characteristics are analyzed across multiple levels of continuous in-phase (I) and quadrature (Q) components and observation-level state vectors [191]. The extracted jammer signal is then separated from the received observation using frequency-domain subtraction [192], component-wise I/Q processing, and adaptive filtering [191]. The corrected signal is subsequently demodulated and decoded using techniques and error correction coding to restore the original signal. To maintain resilience against evolving jamming tactics, the system continuously updates the HDBN model to improve suppression commands. Simulation results show that the mean square error (MSE) of suppression commands decreases as the JSR increases, meaning that jammers attacking with higher power can be better estimated than jammers attacking with lower power." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 45, + 628, + 300, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 628, + 300, + 750 + ], + "spans": [ + { + "bbox": [ + 45, + 628, + 300, + 750 + ], + "type": "text", + "content": "Different from the work in [98], which separates the jamming signal by analyzing its I/Q characteristics, the study in [101] proposes a Serial Interference Cancellation (SIC) scheme based on SDNR to eliminate injected anomalous signals in UAV communications, as shown in part A of Fig. 11. First, blind channel estimation and a WL-MMSE filter are used to identify UAV and jammer signals (as detailed in Section V-A). Then, the PSA ranks detected symbols based on SDNR, where the jamming signals rank higher in SDNR due to their higher emitted power. The SIC [193]," + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 307, + 345, + 564, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 345, + 564, + 477 + ], + "spans": [ + { + "bbox": [ + 307, + 345, + 564, + 477 + ], + "type": "text", + "content": "[194] is subsequently designed for progressively eliminating jamming signals. Specifically, the high-rank jamming symbol is decoded, reconstructed using estimated channel parameters, and subtracted from the received signal. The process continues iteratively to eliminate previously detected jamming signals until all UAV symbols are successfully recovered, with the receiver dynamically updating channel estimation to adapt to jammer mobility and environmental changes. Simulation results demonstrate that the UAV signal can be recovered with low bit error rates " + }, + { + "bbox": [ + 307, + 345, + 564, + 477 + ], + "type": "inline_equation", + "content": "(< 10^{-4})" + }, + { + "bbox": [ + 307, + 345, + 564, + 477 + ], + "type": "text", + "content": " even when the power of the jammer is double that of the UAV." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 307, + 480, + 564, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 480, + 564, + 732 + ], + "spans": [ + { + "bbox": [ + 307, + 480, + 564, + 732 + ], + "type": "text", + "content": "Jamming attacks not only affect U2G and UAV-to-UAV communications but also cause RF interference, leading to UAVs failing to track GNSS signals in the LAENet. In light of this, the work in [104] proposes a self-calibrating digital beamforming (DBF) algorithm to effectively nullify jamming signals while preserving high-precision carrier phase measurements. It calibrates the antenna array's steering vectors and RF channel characteristics. Once calibration is complete, the system performs jamming detection and direction estimation by analyzing interference patterns across the antenna array. Then, the minimum power distortionless response (MPDR) optimization rule is used to calculate optimal beamforming weights, which aim to create nulls in the beam pattern corresponding to the directions of jamming signals, effectively suppressing them. The calculated beamforming weights are applied to the received signals to produce the beamformer output, which effectively suppresses jamming signals while preserving the carrier phase integrity of the desired signals. The proposed scheme achieves up to 80 dB Jammer-to-Signal Ratio (JSR) suppression, significantly outperforming the conventional Power Inversion (PI) scheme." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 318, + 736, + 564, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 736, + 564, + 749 + ], + "spans": [ + { + "bbox": [ + 318, + 736, + 564, + 749 + ], + "type": "text", + "content": "In addition to jamming signals, spoofing attacks can easily" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 555, + 25, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 25, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 555, + 25, + 563, + 32 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 55, + 299, + 341 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 55, + 299, + 341 + ], + "spans": [ + { + "bbox": [ + 48, + 55, + 299, + 341 + ], + "type": "text", + "content": "transmit fake signals to manipulate GNSS signals due to their open signal structure and weak signal strength [195]. One type of method is based on signal encryption or data encryption to prevent malicious spoofers from injecting illegitimate signals [196]–[198]. However, they may not be suitable for resource-constrained UAVs in the LAENet. Therefore, defending against spoofing signal injection based on different signal characteristics is a promising solution. The authors in [105], [106] propose an anti-spoofing system, called SemperFi, to autonomously recover legitimate signals during active spoofing for UAVs. The system employs two core modules: the Adversarial Peak Identifier (API) and the Legitimate Signal Retriever (LSR), as shown in part B of Fig. 11. The API detects spoofed signals by correlating inertial measurement unit (IMU) data with calculated position-velocity-time (PVT) solutions [199]. The LSR module replicates the spoofing signal once it is identified. Then, similar to the study in [101], the SIC is applied to subtract the replica from the composite received signal that contains legitimate and spoofing signals. SemperFi enters an iterative refinement process if spoofing signals persist after initial cancellation, where replication, subtraction, and reassessment are performed until the spoofing detector no longer triggers an alarm, indicating sufficient attenuation or elimination of spoofing." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 342, + 299, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 342, + 299, + 591 + ], + "spans": [ + { + "bbox": [ + 48, + 342, + 299, + 591 + ], + "type": "text", + "content": "Besides recovering legitimate signals by subtracting spoofing signals from the received signal [101], [105], [106], projecting the signal is also a viable injection defense strategy. In the study [107], the GNSS receiver's spoofing mitigation algorithm employs a subspace projection-based interference cancellation method to effectively eliminate spoofing signals, as shown in part C of Fig. 11. Specifically, the receiver on UAVs acquires and tracks incoming signals, identifying potential spoofing signals and reconstructing them based on their power levels, pseudo-random noise (PRN) code delays, and carrier frequencies. Then, the receiver uses these reconstructed spoofing signals to construct a spoofing subspace, which represents all possible linear combinations of spoofing signal characteristics. To effectively remove spoofing signals from the received signal, the receiver performs orthogonal projection to obtain a cleaned signal by mapping the received signal onto a complementary null space that is mathematically orthogonal to the spoofing subspace. Simulation results show that shorter projection lengths suppress spoofing signals more effectively than longer projections, achieving a " + }, + { + "bbox": [ + 48, + 342, + 299, + 591 + ], + "type": "inline_equation", + "content": "20\\mathrm{dB}" + }, + { + "bbox": [ + 48, + 342, + 299, + 591 + ], + "type": "text", + "content": " gain in Signal-to-Interference Ratio (SIR)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 593, + 299, + 747 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 593, + 299, + 747 + ], + "spans": [ + { + "bbox": [ + 48, + 593, + 299, + 747 + ], + "type": "text", + "content": "Lesson Learned The above-mentioned studies have demonstrated the effectiveness for mitigating injection signals, such as jamming and spoofing attacks, thereby enhancing UAV communication reliability and security. These advancements leverage techniques that not only detect malicious signal interference but also enable autonomous recovery. One key advantage is that non-cooperative detection techniques, such as blind estimation [101] and self-awareness models [98], allow for efficient attack identification without requiring prior knowledge of the attacker's signal characteristics to adapt to dynamic and adversarial environments. However, several challenges remain in that beamforming-based or spatial filtering techniques rely on multi-antenna configurations [101]," + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 311, + 55, + 563, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 55, + 563, + 137 + ], + "spans": [ + { + "bbox": [ + 311, + 55, + 563, + 137 + ], + "type": "text", + "content": "[104], limiting their applicability in cost-sensitive or small UAV systems. Future work should explore lightweight and energy-efficient implementations of injection defense to support stable UAV signal integrity protection. Additionally, more intelligent injection defense strategies combining optimization methods, RL, and ML could enhance resilience against more sophisticated adversaries." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 357, + 153, + 516, + 163 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 357, + 153, + 516, + 163 + ], + "spans": [ + { + "bbox": [ + 357, + 153, + 516, + 163 + ], + "type": "text", + "content": "VI. FUTURE RESEARCH DIRECTIONS" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 311, + 169, + 489, + 179 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 169, + 489, + 179 + ], + "spans": [ + { + "bbox": [ + 311, + 169, + 489, + 179 + ], + "type": "text", + "content": "A. Energy-efficient Physical Layer Security" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 311, + 184, + 563, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 184, + 563, + 348 + ], + "spans": [ + { + "bbox": [ + 311, + 184, + 563, + 348 + ], + "type": "text", + "content": "Future work can focus on exploring more unique physical characteristics of wireless communication, such as exploiting channel characteristics and implementing simple coding schemes, to develop secure and low-energy protocols. Meanwhile, drones in the LAENet need to develop adaptive power control strategies that dynamically adjust transmission power based on channel conditions and security requirements to minimize unnecessary energy consumption [200]. Moreover, dynamic trajectory optimization is equally important for energy efficiency [201]. Future research can explore enabling UAVs to learn attack patterns in real time, share secure trajectory models across swarms, and dynamically adjust flight paths based on real-time security and power consumption feedback." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 311, + 367, + 545, + 378 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 367, + 545, + 378 + ], + "spans": [ + { + "bbox": [ + 311, + 367, + 545, + 378 + ], + "type": "text", + "content": "B. Multi-drone Collaboration for Secure Communication" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 311, + 383, + 563, + 524 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 383, + 563, + 524 + ], + "spans": [ + { + "bbox": [ + 311, + 383, + 563, + 524 + ], + "type": "text", + "content": "Future research on secure physical layer communication in the LAENet should move beyond existing dual-UAV collaboration models and explore distributed multi-UAV coordination (or UAV swarms) for enhanced resilience against jamming, spoofing, and unauthorized access [202]. For example, UAV swarms can collaboratively emit interference signals to obscure unauthorized receivers, thereby enhancing the confidentiality of communications [20]. Additionally, the integration of adaptive trust-based mutual authentication protocols among UAVs is essential [26]. Multiple UAVs with mutually verified identities can enable dynamic and secure spectrum-sharing mechanisms to optimize resource utilization in the LAENet." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 311, + 542, + 471, + 553 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 542, + 471, + 553 + ], + "spans": [ + { + "bbox": [ + 311, + 542, + 471, + 553 + ], + "type": "text", + "content": "C. AI-driven Security Defense Strategy" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 311, + 558, + 563, + 747 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 558, + 563, + 747 + ], + "spans": [ + { + "bbox": [ + 311, + 558, + 563, + 747 + ], + "type": "text", + "content": "Existing AI-based security strategies mainly focus on training AI models to identify anomalous signals while having some limitations. The resource-constrained drones are unable to train high-quality AI models, making the integration of edge computing a promising approach for model training [200]. Note that AI models may be difficult to generalize in recognizing various anomalous signals because they are pre-trained on previously collected datasets of fixed size. Future work can explore leveraging GAN or diffusion models to generate datasets based on real-time captured anomalous signals [203]. Furthermore, emerging generative AI technologies, such as the diffusion model for secure network topology generation in low-altitude domains [189], [204], AI agents for human-aerial vehicle secure interaction [205], and mixture of experts for robust wireless communications [2], [206], can be explored to achieve a more autonomous and intelligent LAENet." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 555, + 25, + 563, + 31 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 25, + 563, + 31 + ], + "spans": [ + { + "bbox": [ + 555, + 25, + 563, + 31 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 55, + 271, + 67 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 55, + 271, + 67 + ], + "spans": [ + { + "bbox": [ + 46, + 55, + 271, + 67 + ], + "type": "text", + "content": "D. Space-Air-Ground Integrated Security Architecture" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 45, + 70, + 301, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 70, + 301, + 202 + ], + "spans": [ + { + "bbox": [ + 45, + 70, + 301, + 202 + ], + "type": "text", + "content": "Future research can explore establishing a multi-domain physical layer security framework for LAENet to connect space, air, and ground layers, providing seamless communication coverage and cost-effective network access [55], [207]. A potential key research direction is the development of a coordinated multi-tier security mechanism, where satellites, UAVs, and terrestrial base stations collaboratively enhance physical layer security through dynamic resource allocation and interference management based on real-time CSI and environmental conditions, such as UAV mobility, channel fading, and spectrum constraints." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 218, + 231, + 229 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 218, + 231, + 229 + ], + "spans": [ + { + "bbox": [ + 46, + 218, + 231, + 229 + ], + "type": "text", + "content": "E. 6G-Enabled Secure UAV Communication" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 234, + 301, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 234, + 301, + 390 + ], + "spans": [ + { + "bbox": [ + 45, + 234, + 301, + 390 + ], + "type": "text", + "content": "The advent of 6G networks presents new opportunities for LAENet. Terahertz (THz) communication can offer ultrahigh-speed data transmission capabilities for LAENet [208]. Future research can explore the integration of THz with advanced beamforming techniques to focus signals on legitimate users, thereby enhancing security and reducing the risk of interception. Furthermore, Reconfigurable Intelligent Surfaces (RIS) play a crucial role in strengthening physical layer security by intelligently controlling wireless signal propagation [209], [210]. Future work can investigate RIS-based secure beamforming strategies to mitigate adversary interception, and leverage optimization techniques and DRL to adaptively adjust beamforming against eavesdropping or jamming attacks." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 403, + 215, + 415 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 403, + 215, + 415 + ], + "spans": [ + { + "bbox": [ + 132, + 403, + 215, + 415 + ], + "type": "text", + "content": "VII. CONCLUSION" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 419, + 302, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 419, + 302, + 598 + ], + "spans": [ + { + "bbox": [ + 45, + 419, + 302, + 598 + ], + "type": "text", + "content": "This paper has presented a comprehensive survey on secure physical layer communications in the LAENet, emphasizing the importance of safeguarding confidentiality, availability, and integrity in communications. It introduced the concept and architecture of the LAENet and outlined the associated security issues in physical layer communication. Then, the survey provided in-depth reviews of countermeasures for anti-eavesdropping strategies, authentication schemes, anti-jamming strategies, spoofing defenses, anomaly detection, and injection defense. Finally, the paper proposed a set of forward-looking future research directions. These discussions highlighted the critical role of secure physical layer communication in supporting the development of the LAENet and offered valuable insights for ongoing advancements in this emerging domain." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 144, + 613, + 203, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 144, + 613, + 203, + 624 + ], + "spans": [ + { + "bbox": [ + 144, + 613, + 203, + 624 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 54, + 629, + 301, + 748 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 54, + 629, + 299, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 629, + 299, + 666 + ], + "spans": [ + { + "bbox": [ + 54, + 629, + 299, + 666 + ], + "type": "text", + "content": "[1] Z. Li, Z. Gao, K. Wang, Y. Mei, C. Zhu, L. Chen, X. Wu, and D. Niyato, \"Unauthorized uav countermeasure for low-altitude economy: Joint communications and jamming based on mimo cellular systems,\" IEEE Internet of Things Journal, vol. 12, no. 6, pp. 6659-6672, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 666, + 299, + 701 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 666, + 299, + 701 + ], + "spans": [ + { + "bbox": [ + 55, + 666, + 299, + 701 + ], + "type": "text", + "content": "[2] C. Zhao, J. Wang, R. Zhang, D. Niyato, G. Sun, H. Du, D. I. Kim, and A. Jamalipour, \"Generative ai-enabled wireless communications for robust low-altitude economy networking,\" arXiv preprint arXiv:2502.18118, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 702, + 301, + 748 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 702, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 55, + 702, + 301, + 748 + ], + "type": "text", + "content": "[3] H. A. H. Alobaidy, R. Nordin, M. J. Singh, N. F. Abdullah, A. Haniz, K. Ishizu, T. Matsumura, F. Kojima, and N. Ramli, \"Low-altitude-platform-based airborne IoT network (lap-ain) for water quality monitoring in harsh tropical environment,\" IEEE Internet of Things Journal, vol. 9, no. 20, pp. 20034-20054, 2022." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 314, + 56, + 564, + 748 + ], + "type": "list", + "angle": 0, + "index": 32, + "blocks": [ + { + "bbox": [ + 318, + 56, + 564, + 83 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 56, + 564, + 83 + ], + "spans": [ + { + "bbox": [ + 318, + 56, + 564, + 83 + ], + "type": "text", + "content": "[4] China holds central economic work conference to plan for 2024. Accessed: Dec. 12, 2023. [Online]. Available: https://english.www.gov.cn/news/202312/12/content_WS657860aecd0868f4e8e21c2.html" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 318, + 84, + 564, + 119 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 84, + 564, + 119 + ], + "spans": [ + { + "bbox": [ + 318, + 84, + 564, + 119 + ], + "type": "text", + "content": "[5] J. Qiu, D. Grace, G. Ding, M. D. Zakaria, and Q. Wu, \"Air-ground heterogeneous networks for 5g and beyond via integrating high and low altitude platforms,\" IEEE Wireless Communications, vol. 26, no. 6, pp. 140-148, 2019." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 318, + 120, + 564, + 154 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 120, + 564, + 154 + ], + "spans": [ + { + "bbox": [ + 318, + 120, + 564, + 154 + ], + "type": "text", + "content": "[6] H. Ahmadinejad and A. Falahati, \"Forming a two-tier heterogeneous air-network via combination of high and low altitude platforms,\" IEEE Transactions on Vehicular Technology, vol. 71, no. 2, pp. 1989-2001, 2022." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 318, + 155, + 563, + 191 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 155, + 563, + 191 + ], + "spans": [ + { + "bbox": [ + 318, + 155, + 563, + 191 + ], + "type": "text", + "content": "[7] N. Hossein Motlagh, T. Taleb, and O. Arouk, \"Low-altitude unmanned aerial vehicles-based internet of things services: Comprehensive survey and future perspectives,\" IEEE Internet of Things Journal, vol. 3, no. 6, pp. 899-922, 2016." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 318, + 192, + 563, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 192, + 563, + 227 + ], + "spans": [ + { + "bbox": [ + 318, + 192, + 563, + 227 + ], + "type": "text", + "content": "[8] H. Yang, M. Zheng, Z. Shao, Y. Jiang, and Z. Xiong, \"Intelligent computation offloading and trajectory planning for 3d target search in low-altitude economy scenarios,\" IEEE Wireless Communications Letters, pp. 1-1, 2025." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 318, + 228, + 563, + 271 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 228, + 563, + 271 + ], + "spans": [ + { + "bbox": [ + 318, + 228, + 563, + 271 + ], + "type": "text", + "content": "[9] R. Shakeri, M. A. Al-Garadi, A. Badawy, A. Mohamed, T. Khattab, A. K. Al-Ali, K. A. Harras, and M. Guizani, \"Design challenges of multi-uav systems in cyber-physical applications: A comprehensive survey and future directions,\" IEEE Communications Surveys & Tutorials, vol. 21, no. 4, pp. 3340-3385, 2019." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 315, + 272, + 563, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 272, + 563, + 308 + ], + "spans": [ + { + "bbox": [ + 315, + 272, + 563, + 308 + ], + "type": "text", + "content": "[10] Y. Zhang, X. Gao, N. Ye, D. Niyato, Z. Han, and K. Yang, \"Joint uav deployment, power allocation, and coalition formation for physical layer security in heterogeneous networks,\" IEEE Transactions on Vehicular Technology, pp. 1-15, 2025." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 314, + 308, + 563, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 308, + 563, + 335 + ], + "spans": [ + { + "bbox": [ + 314, + 308, + 563, + 335 + ], + "type": "text", + "content": "[11] Z. Liu, Y. Cao, P. Gao, X. Hua, D. Zhang, and T. Jiang, \"Multi-uav network assisted intelligent edge computing: Challenges and opportunities,\" China Communications, vol. 19, no. 3, pp. 258-278, 2022." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 314, + 335, + 563, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 335, + 563, + 371 + ], + "spans": [ + { + "bbox": [ + 314, + 335, + 563, + 371 + ], + "type": "text", + "content": "[12] Y. Liu, X. Gong, J. Chen, S. Chen, and Y. Yang, \"Rotation-invariant siamese network for low-altitude remote-sensing image registration,\" IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing, vol. 13, pp. 5746-5758, 2020." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 314, + 371, + 563, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 371, + 563, + 398 + ], + "spans": [ + { + "bbox": [ + 314, + 371, + 563, + 398 + ], + "type": "text", + "content": "[13] G. Cheng, X. Song, Z. Lyu, and J. Xu, \"Networked isac for low-altitude economy: Coordinated transmit beamforming and UAV trajectory design,\" IEEE Transactions on Communications, pp. 1-1, 2025." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 314, + 398, + 563, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 398, + 563, + 434 + ], + "spans": [ + { + "bbox": [ + 314, + 398, + 563, + 434 + ], + "type": "text", + "content": "[14] G. Cheng, X. Song, Z. Lyu, and J. Xu, “Networked isac for low-altitude economy: Transmit beamforming and uav trajectory design,” in 2024 IEEE/CIC International Conference on Communications in China (ICCC), 2024, pp. 78-83." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 314, + 434, + 563, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 434, + 563, + 469 + ], + "spans": [ + { + "bbox": [ + 314, + 434, + 563, + 469 + ], + "type": "text", + "content": "[15] X. Zheng, G. Sun, J. Li, J. Wang, Q. Wu, D. Niyato, and A. Jamalipour, \"Uav swarm-enabled collaborative post-disaster communications in low altitude economy via a two-stage optimization approach,\" arXiv preprint arXiv:2501.05742, 2025." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 314, + 470, + 563, + 496 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 470, + 563, + 496 + ], + "spans": [ + { + "bbox": [ + 314, + 470, + 563, + 496 + ], + "type": "text", + "content": "[16] China's low-altitude economy soars at high speed. Accessed: Dec. 19, 2024. [Online]. Available: https://www.chinadaily.com.cn/a/202412/19/WS6763b8b7a310f1265a1d3d24.html" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 314, + 497, + 563, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 497, + 563, + 532 + ], + "spans": [ + { + "bbox": [ + 314, + 497, + 563, + 532 + ], + "type": "text", + "content": "[17] China's low-altitude economy takes flight: A new engine for innovation-driven growth. Accessed: Mar. 17, 2025. [Online]. Available: https://www.chinadaily.com.cn/a/202412/19/WS6763b8b7a310f1265a1d3d24.html" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 314, + 533, + 563, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 533, + 563, + 568 + ], + "spans": [ + { + "bbox": [ + 314, + 533, + 563, + 568 + ], + "type": "text", + "content": "[18] Flying air taxis move closer to us takeoff with issuing of FAA rule. Accessed: Oct. 22, 2024. [Online]. Available: https://www.usnews.com/news/business/articles/2024-10-22/flying-air-taxis-move-closer-to-us-takeoff-with-issuing-of-faa-rule" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 314, + 568, + 563, + 603 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 568, + 563, + 603 + ], + "spans": [ + { + "bbox": [ + 314, + 568, + 563, + 603 + ], + "type": "text", + "content": "[19] A. Rugo, C. A. Ardagna, and N. E. Ioini, “A security review in the uavnet era: Threats, countermeasures, and gap analysis,” ACM Comput. Surv., vol. 55, no. 1, Jan. 2022. [Online]. Available: https://doi.org/10.1145/3485272" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 314, + 604, + 563, + 639 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 604, + 563, + 639 + ], + "spans": [ + { + "bbox": [ + 314, + 604, + 563, + 639 + ], + "type": "text", + "content": "[20] X. Wang, Z. Zhao, L. Yi, Z. Ning, L. Guo, F. R. Yu, and S. Guo, \"A survey on security of uav swarm networks: Attacks and countermeasures,\" ACM Comput. Surv., vol. 57, no. 3, Nov. 2024. [Online]. Available: https://doi.org/10.1145/3703625" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 314, + 640, + 563, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 640, + 563, + 666 + ], + "spans": [ + { + "bbox": [ + 314, + 640, + 563, + 666 + ], + "type": "text", + "content": "[21] O. Ceviz, S. Sen, and P. Sadioglu, “A survey of security in uavs and fanets: issues, threats, analysis of attacks, and solutions,” IEEE Communications Surveys & Tutorials, pp. 1-1, 2024." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 314, + 667, + 563, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 667, + 563, + 712 + ], + "spans": [ + { + "bbox": [ + 314, + 667, + 563, + 712 + ], + "type": "text", + "content": "[22] H. J. Hadi, Y. Cao, K. U. Nisa, A. M. Jamil, and Q. Ni, \"A comprehensive survey on security, privacy issues and emerging defence technologies for uavs,\" Journal of Network and Computer Applications, vol. 213, p. 103607, 2023. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S1084804523000267" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 314, + 712, + 563, + 748 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 712, + 563, + 748 + ], + "spans": [ + { + "bbox": [ + 314, + 712, + 563, + 748 + ], + "type": "text", + "content": "[23] V. Hassija, V. Chamola, A. Agrawal, A. Goyal, N. C. Luong, D. Niyato, F. R. Yu, and M. Guizani, \"Fast, reliable, and secure drone communication: A comprehensive survey,\" IEEE Communications Surveys & Tutorials, vol. 23, no. 4, pp. 2802-2832, 2021." + } + ] + } + ], + "index": 31 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 554, + 25, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 554, + 25, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 554, + 25, + 563, + 32 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 57, + 301, + 748 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 50, + 57, + 301, + 93 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 57, + 301, + 93 + ], + "spans": [ + { + "bbox": [ + 50, + 57, + 301, + 93 + ], + "type": "text", + "content": "[24] B. Zolfaghari, M. Abbasmollaei, F. Hajizadeh, N. Yanai, and K. Bibak, \"Secure uav (drone) and the great promise of ai,\" ACM Comput. Surv., vol. 56, no. 11, Jul. 2024. [Online]. Available: https://doi.org/10.1145/3673225" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 93, + 301, + 120 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 93, + 301, + 120 + ], + "spans": [ + { + "bbox": [ + 50, + 93, + 301, + 120 + ], + "type": "text", + "content": "[25] X. Wei, J. Ma, and C. Sun, “A survey on security of unmanned aerial vehicle systems: Attacks and countermeasures,” IEEE Internet of Things Journal, vol. 11, no. 21, pp. 34826-34847, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 51, + 121, + 301, + 166 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 121, + 301, + 166 + ], + "spans": [ + { + "bbox": [ + 51, + 121, + 301, + 166 + ], + "type": "text", + "content": "[26] M. Adil, M. A. Jan, Y. Liu, H. Abulkasim, A. Farouk, and H. Song, \"A systematic survey: Security threats to UAV-aided IoT applications, taxonomy, current challenges and requirements with future research directions,\" IEEE Transactions on Intelligent Transportation Systems, vol. 24, no. 2, pp. 1437-1455, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 51, + 166, + 301, + 202 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 166, + 301, + 202 + ], + "spans": [ + { + "bbox": [ + 51, + 166, + 301, + 202 + ], + "type": "text", + "content": "[27] N. Kumar and A. Chaudhary, \"Surveying cybersecurity vulnerabilities and countermeasures for enhancing uav security,\" Computer Networks, vol. 252, p. 110695, 2024. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S1389128624005279" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 202, + 301, + 238 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 202, + 301, + 238 + ], + "spans": [ + { + "bbox": [ + 51, + 202, + 301, + 238 + ], + "type": "text", + "content": "[28] J. Wang, X. Wang, R. Gao, C. Lei, W. Feng, N. Ge, S. Jin, and T. Q. S. Quek, “Physical layer security for uav communications: A comprehensive survey,” China Communications, vol. 19, no. 9, pp. 77–115, 2022." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 238, + 301, + 283 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 238, + 301, + 283 + ], + "spans": [ + { + "bbox": [ + 51, + 238, + 301, + 283 + ], + "type": "text", + "content": "[29] A. Fotouhi, H. Qiang, M. Ding, M. Hassan, L. G. Giordano, A. Garcia-Rodriguez, and J. Yuan, \"Survey on uav cellular communications: Practical aspects, standardization advancements, regulation, and security challenges,\" IEEE Communications Surveys & Tutorials, vol. 21, no. 4, pp. 3417-3442, 2019." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 51, + 284, + 301, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 284, + 301, + 320 + ], + "spans": [ + { + "bbox": [ + 51, + 284, + 301, + 320 + ], + "type": "text", + "content": "[30] M. Adil, H. Song, S. Mastorakis, H. Abulkasim, A. Farouk, and Z. Jin, \"Uav-assisted IoT applications, cybersecurity threats, ai-enabled solutions, open challenges with future research directions,\" IEEE Transactions on Intelligent Vehicles, vol. 9, no. 4, pp. 4583-4605, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 51, + 320, + 301, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 320, + 301, + 365 + ], + "spans": [ + { + "bbox": [ + 51, + 320, + 301, + 365 + ], + "type": "text", + "content": "[31] W. U. Khan, E. Lagunas, Z. Ali, M. A. Javed, M. Ahmed, S. Chatzinotas, B. Ottersten, and P. Popovski, \"Opportunities for physical layer security in uav communication enhanced with intelligent reflective surfaces,\" IEEE Wireless Communications, vol. 29, no. 6, pp. 22-28, 2022." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 51, + 365, + 301, + 402 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 365, + 301, + 402 + ], + "spans": [ + { + "bbox": [ + 51, + 365, + 301, + 402 + ], + "type": "text", + "content": "[32] J. Wang, H. Du, D. Niyato, M. Zhou, J. Kang, and H. Vincent Poor, \"Acceleration estimation of signal propagation path length changes for wireless sensing,\" IEEE Transactions on Wireless Communications, vol. 23, no. 9, pp. 11476-11492, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 51, + 402, + 301, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 402, + 301, + 430 + ], + "spans": [ + { + "bbox": [ + 51, + 402, + 301, + 430 + ], + "type": "text", + "content": "[33] T. Wang, C.-K. Wen, H. Wang, F. Gao, T. Jiang, and S. Jin, \"Deep learning for wireless physical layer: Opportunities and challenges,\" China Communications, vol. 14, no. 11, pp. 92-111, 2017." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 51, + 430, + 301, + 465 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 430, + 301, + 465 + ], + "spans": [ + { + "bbox": [ + 51, + 430, + 301, + 465 + ], + "type": "text", + "content": "[34] J. Wang, H. Du, D. Niyato, J. Kang, S. Cui, X. Shen, and P. Zhang, \"Generative ai for integrated sensing and communication: Insights from the physical layer perspective,\" IEEE Wireless Communications, vol. 31, no. 5, pp. 246-255, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 51, + 466, + 301, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 466, + 301, + 501 + ], + "spans": [ + { + "bbox": [ + 51, + 466, + 301, + 501 + ], + "type": "text", + "content": "[35] S. Li, L. Xiao, Y. Liu, G. Liu, P. Xiao, and T. Jiang, \"Performance analysis for orthogonal time frequency space modulation systems with generalized waveform,\" China Communications, vol. 20, no. 4, pp. 57-72, 2023." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 51, + 502, + 301, + 538 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 502, + 301, + 538 + ], + "spans": [ + { + "bbox": [ + 51, + 502, + 301, + 538 + ], + "type": "text", + "content": "[36] N. Xie, W. Xiong, M. Sha, T. Hu, P. Zhang, L. Huang, and D. Niyato, \"Physical layer authentication with high compatibility using an encoding approach,\" IEEE Transactions on Communications, vol. 70, no. 12, pp. 8270-8285, 2022." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 51, + 538, + 301, + 583 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 538, + 301, + 583 + ], + "spans": [ + { + "bbox": [ + 51, + 538, + 301, + 583 + ], + "type": "text", + "content": "[37] S. Liu, T. Wang, and S. Wang, \"Toward intelligent wireless communications: Deep learning - based physical layer technologies,\" Digital Communications and Networks, vol. 7, no. 4, pp. 589-597, 2021. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S2352864821000742" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 51, + 584, + 301, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 584, + 301, + 620 + ], + "spans": [ + { + "bbox": [ + 51, + 584, + 301, + 620 + ], + "type": "text", + "content": "[38] Y. Zhang, Y. Peng, X. Tang, L. Xiao, and T. Jiang, \"Large-scale fading decoding aided user-centric cell-free massive mimo: Uplink error probability analysis and detector design,\" IEEE Transactions on Wireless Communications, vol. 23, no. 8, pp. 10336-10349, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 51, + 620, + 301, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 620, + 301, + 657 + ], + "spans": [ + { + "bbox": [ + 51, + 620, + 301, + 657 + ], + "type": "text", + "content": "[39] H. Du, J. Wang, D. Niyato, J. Kang, Z. Xiong, J. Zhang, and X. Shen, \"Semantic communications for wireless sensing: Ris-aided encoding and self-supervised decoding,\" IEEE Journal on Selected Areas in Communications, vol. 41, no. 8, pp. 2547-2562, 2023." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 51, + 657, + 301, + 693 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 657, + 301, + 693 + ], + "spans": [ + { + "bbox": [ + 51, + 657, + 301, + 693 + ], + "type": "text", + "content": "[40] P. Yang, X. Xi, K. Guo, T. Q. S. Quek, J. Chen, and X. Cao, \"Proactive uav network slicing for urllc and mobile broadband service multiplexing,\" IEEE Journal on Selected Areas in Communications, vol. 39, no. 10, pp. 3225-3244, 2021." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 51, + 693, + 301, + 728 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 693, + 301, + 728 + ], + "spans": [ + { + "bbox": [ + 51, + 693, + 301, + 728 + ], + "type": "text", + "content": "[41] J. Huang, A. Wang, G. Sun, J. Li, J. Wang, H. Du, and D. Niyato, \"Dual uav cluster-assisted maritime physical layer secure communications via collaborative beamforming,\" IEEE Internet of Things Journal, pp. 1-1, 2024." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 51, + 729, + 301, + 748 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 729, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 51, + 729, + 301, + 748 + ], + "type": "text", + "content": "[42] Z. Duan, Z. Chang, N. Xie, W. Sun, and D. T. Niyato, \"Adaptive strategies in enhancing physical layer security: A comprehensive" + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 314, + 57, + 563, + 748 + ], + "type": "list", + "angle": 0, + "index": 42, + "blocks": [ + { + "bbox": [ + 331, + 57, + 563, + 76 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 57, + 563, + 76 + ], + "spans": [ + { + "bbox": [ + 331, + 57, + 563, + 76 + ], + "type": "text", + "content": "survey,\" ACM Comput. Surv., vol. 57, no. 7, Feb. 2025. [Online]. Available: https://doi.org/10.1145/3715319" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 315, + 76, + 563, + 102 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 76, + 563, + 102 + ], + "spans": [ + { + "bbox": [ + 315, + 76, + 563, + 102 + ], + "type": "text", + "content": "[43] Q. Wang, Z. Chen, W. Mei, and J. Fang, \"Improving physical layer security using uav-enabled mobile relaying,\" IEEE Wireless Communications Letters, vol. 6, no. 3, pp. 310-313, 2017." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 314, + 102, + 563, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 102, + 563, + 146 + ], + "spans": [ + { + "bbox": [ + 314, + 102, + 563, + 146 + ], + "type": "text", + "content": "[44] S. Liu, H. Yang, M. Zheng, L. Xiao, Z. Xiong, and D. Niyato, “Uav-enabled semantic communication in mobile edge computing under jamming attacks: An intelligent resource management approach,” IEEE Transactions on Wireless Communications, vol. 23, no. 11, pp. 17 493–17 507, 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 314, + 146, + 563, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 146, + 563, + 182 + ], + "spans": [ + { + "bbox": [ + 314, + 146, + 563, + 182 + ], + "type": "text", + "content": "[45] S. Bi, K. Li, S. Hu, W. Ni, C. Wang, and X. Wang, “Detection and mitigation of position spoofing attacks on cooperative uav swarm formations,” IEEE Transactions on Information Forensics and Security, vol. 19, pp. 1883–1895, 2024." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 314, + 182, + 563, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 182, + 563, + 209 + ], + "spans": [ + { + "bbox": [ + 314, + 182, + 563, + 209 + ], + "type": "text", + "content": "[46] X. Sun, D. W. K. Ng, Z. Ding, Y. Xu, and Z. Zhong, \"Physical layer security in uav systems: Challenges and opportunities,\" IEEE Wireless Communications, vol. 26, no. 5, pp. 40-47, 2019." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 314, + 209, + 563, + 245 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 209, + 563, + 245 + ], + "spans": [ + { + "bbox": [ + 314, + 209, + 563, + 245 + ], + "type": "text", + "content": "[47] G. Zhang, Q. Hu, Y. Zhang, Y. Dai, and T. Jiang, \"Lightweight cross-domain authentication scheme for securing wireless IoT devices using backscatter communication,\" IEEE Internet of Things Journal, vol. 11, no. 12, pp. 22021-22035, 2024." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 314, + 245, + 563, + 272 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 245, + 563, + 272 + ], + "spans": [ + { + "bbox": [ + 314, + 245, + 563, + 272 + ], + "type": "text", + "content": "[48] Q. Wu, W. Mei, and R. Zhang, \"Safeguarding wireless network with uavs: A physical layer security perspective,\" IEEE Wireless Communications, vol. 26, no. 5, pp. 12-18, 2019." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 314, + 272, + 563, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 272, + 563, + 300 + ], + "spans": [ + { + "bbox": [ + 314, + 272, + 563, + 300 + ], + "type": "text", + "content": "[49] H.-M. Wang, X. Zhang, and J.-C. Jiang, “Uav-involved wireless physical-layer secure communications: Overview and research directions,” IEEE Wireless Communications, vol. 26, no. 5, pp. 32-39, 2019." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 314, + 299, + 563, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 299, + 563, + 326 + ], + "spans": [ + { + "bbox": [ + 314, + 299, + 563, + 326 + ], + "type": "text", + "content": "[50] B. Li, Z. Fei, Y. Zhang, and M. Guizani, \"Secure uav communication networks over 5g,\" IEEE Wireless Communications, vol. 26, no. 5, pp. 114-120, 2019." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 314, + 326, + 563, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 326, + 563, + 361 + ], + "spans": [ + { + "bbox": [ + 314, + 326, + 563, + 361 + ], + "type": "text", + "content": "[51] L. Bai, L. Zhu, J. Liu, J. Choi, and W. Zhang, \"Physical layer authentication in wireless communication networks: A survey,\" Journal of Communications and Information Networks, vol. 5, no. 3, pp. 237-264, 2020." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 314, + 361, + 563, + 389 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 361, + 563, + 389 + ], + "spans": [ + { + "bbox": [ + 314, + 361, + 563, + 389 + ], + "type": "text", + "content": "[52] N. Xie, Z. Li, and H. Tan, \"A survey of physical-layer authentication in wireless communications,\" IEEE Communications Surveys & Tutorials, vol. 23, no. 1, pp. 282-310, 2021." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 314, + 389, + 563, + 416 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 389, + 563, + 416 + ], + "spans": [ + { + "bbox": [ + 314, + 389, + 563, + 416 + ], + "type": "text", + "content": "[53] Y. Xu, T. Zhang, D. Yang, Y. Liu, and M. Tao, \"Joint resource and trajectory optimization for security in uav-assisted mec systems,\" IEEE Transactions on Communications, vol. 69, no. 1, pp. 573-588, 2021." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 314, + 416, + 563, + 452 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 416, + 563, + 452 + ], + "spans": [ + { + "bbox": [ + 314, + 416, + 563, + 452 + ], + "type": "text", + "content": "[54] Y. Zhang, Z. Kuang, Y. Feng, and F. Hou, \"Task offloading and trajectory optimization for secure communications in dynamic user multi-uav mec systems,\" IEEE Transactions on Mobile Computing, vol. 23, no. 12, pp. 14427-14440, 2024." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 314, + 452, + 563, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 452, + 563, + 487 + ], + "spans": [ + { + "bbox": [ + 314, + 452, + 563, + 487 + ], + "type": "text", + "content": "[55] Y. Zhang, X. Gao, H. Yuan, K. Yang, J. Kang, P. Wang, and D. Niyato, \"Joint uav trajectory and power allocation with hybrid fso/rf for secure space-air-ground communications,\" IEEE Internet of Things Journal, vol. 11, no. 19, pp. 31407-31421, 2024." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 314, + 487, + 563, + 523 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 487, + 563, + 523 + ], + "spans": [ + { + "bbox": [ + 314, + 487, + 563, + 523 + ], + "type": "text", + "content": "[56] W. Wang, X. Li, R. Wang, K. Cumanan, W. Feng, Z. Ding, and O. A. Dobre, \"Robust 3d-trajectory and time switching optimization for dual-uav-enabled secure communications,\" IEEE Journal on Selected Areas in Communications, vol. 39, no. 11, pp. 3334-3347, 2021." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 314, + 523, + 563, + 559 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 523, + 563, + 559 + ], + "spans": [ + { + "bbox": [ + 314, + 523, + 563, + 559 + ], + "type": "text", + "content": "[57] C. Wen, L. Qiu, and X. Liang, \"Securing uav communication with mobile uav eavesdroppers: Joint trajectory and communication design,\" in 2021 IEEE Wireless Communications and Networking Conference (WCNC), 2021, pp. 1-6." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 314, + 559, + 563, + 603 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 559, + 563, + 603 + ], + "spans": [ + { + "bbox": [ + 314, + 559, + 563, + 603 + ], + "type": "text", + "content": "[58] W. Lu, Y. Ding, Y. Gao, S. Hu, Y. Wu, N. Zhao, and Y. Gong, \"Resource and trajectory optimization for secure communications in dual unmanned aerial vehicle mobile edge computing systems,\" IEEE Transactions on Industrial Informatics, vol. 18, no. 4, pp. 2704-2713, 2022." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 314, + 603, + 563, + 640 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 603, + 563, + 640 + ], + "spans": [ + { + "bbox": [ + 314, + 603, + 563, + 640 + ], + "type": "text", + "content": "[59] F. Lu, G. Liu, W. Lu, Y. Gao, J. Cao, N. Zhao, and A. Nallanathan, \"Resource and trajectory optimization for uav-relay-assisted secure maritime mec,\" IEEE Transactions on Communications, vol. 72, no. 3, pp. 1641-1652, 2024." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 314, + 640, + 563, + 676 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 640, + 563, + 676 + ], + "spans": [ + { + "bbox": [ + 314, + 640, + 563, + 676 + ], + "type": "text", + "content": "[60] A. S. Abdalla, A. Behfarnia, and V. Marojevic, \"Uav trajectory and multi-user beamforming optimization for clustered users against passive eavesdropping attacks with unknown csi,\" IEEE Transactions on Vehicular Technology, vol. 72, no. 11, pp. 14426-14442, 2023." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 314, + 676, + 563, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 676, + 563, + 712 + ], + "spans": [ + { + "bbox": [ + 314, + 676, + 563, + 712 + ], + "type": "text", + "content": "[61] Y. Ding, H. Han, W. Lu, Y. Wang, N. Zhao, X. Wang, and X. Yang, \"Ddqn-based trajectory and resource optimization for uav-aided mec secure communications,\" IEEE Transactions on Vehicular Technology, vol. 73, no. 4, pp. 6006-6011, 2024." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 314, + 712, + 563, + 748 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 712, + 563, + 748 + ], + "spans": [ + { + "bbox": [ + 314, + 712, + 563, + 748 + ], + "type": "text", + "content": "[62] H. Kang, X. Chang, J. Mišić, V. B. Mišić, J. Fan, and J. Bai, “Improving dual-uav aided ground-uav bi-directional communication security: Joint uav trajectory and transmit power optimization,” IEEE Transactions on Vehicular Technology, vol. 71, no. 10, pp. 10570–10583, 2022." + } + ] + } + ], + "index": 41 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 554, + 25, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 554, + 25, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 554, + 25, + 563, + 32 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 57, + 301, + 747 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 50, + 57, + 301, + 93 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 57, + 301, + 93 + ], + "spans": [ + { + "bbox": [ + 50, + 57, + 301, + 93 + ], + "type": "text", + "content": "[63] Y. Zhang, Z. Mou, F. Gao, J. Jiang, R. Ding, and Z. Han, \"Uav-enabled secure communications by multi-agent deep reinforcement learning,\" IEEE Transactions on Vehicular Technology, vol. 69, no. 10, pp. 11599-11611, 2020." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 94, + 301, + 129 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 94, + 301, + 129 + ], + "spans": [ + { + "bbox": [ + 50, + 94, + 301, + 129 + ], + "type": "text", + "content": "[64] Y. Liu, C. Huang, G. Chen, R. Song, S. Song, and P. Xiao, “Deep learning empowered trajectory and passive beamforming design in uav-ris enabled secure cognitive non-terrestrial networks,” IEEE Wireless Communications Letters, vol. 13, no. 1, pp. 188–192, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 51, + 130, + 301, + 174 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 130, + 301, + 174 + ], + "spans": [ + { + "bbox": [ + 51, + 130, + 301, + 174 + ], + "type": "text", + "content": "[65] J. Wang, R. Wang, Z. Zheng, R. Lin, L. Wu, and F. Shu, \"Physical layer security enhancement in uav-assisted cooperative jamming for cognitive radio networks: A mappo-lstm deep reinforcement learning approach,\" IEEE Transactions on Vehicular Technology, pp. 1-14, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 51, + 175, + 301, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 175, + 301, + 203 + ], + "spans": [ + { + "bbox": [ + 51, + 175, + 301, + 203 + ], + "type": "text", + "content": "[66] X. Tang, N. Liu, R. Zhang, and Z. Han, \"Deep learning-assisted secure uav-relaying networks with channel uncertainties,\" IEEE Transactions on Vehicular Technology, vol. 71, no. 5, pp. 5048-5059, 2022." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 204, + 301, + 231 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 204, + 301, + 231 + ], + "spans": [ + { + "bbox": [ + 51, + 204, + 301, + 231 + ], + "type": "text", + "content": "[67] X. Li, R. Yao, Y. Fan, P. Wang, and J. Xu, \"Secure efficiency map-enabled uav trajectory planning,\" IEEE Wireless Communications Letters, vol. 12, no. 8, pp. 1324-1328, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 232, + 301, + 268 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 232, + 301, + 268 + ], + "spans": [ + { + "bbox": [ + 51, + 232, + 301, + 268 + ], + "type": "text", + "content": "[68] R. Karmakar, G. Kaddoum, and O. Akhrif, “A novel federated learning-based smart power and 3d trajectory control for fairness optimization in secure uav-assisted mec services,” IEEE Transactions on Mobile Computing, vol. 23, no. 5, pp. 4832–4848, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 51, + 269, + 301, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 269, + 301, + 296 + ], + "spans": [ + { + "bbox": [ + 51, + 269, + 301, + 296 + ], + "type": "text", + "content": "[69] Z. Li, X. Liao, J. Shi, L. Li, and P. Xiao, “Md-gan-based uav trajectory and power optimization for cognitive covert communications,” IEEE Internet of Things Journal, vol. 9, no. 12, pp. 10187-10199, 2022." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 51, + 297, + 301, + 332 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 297, + 301, + 332 + ], + "spans": [ + { + "bbox": [ + 51, + 297, + 301, + 332 + ], + "type": "text", + "content": "[70] S. Jia, L. Xiaomeng, L. Xiaomin, T. Zhuangzhuang, and H. Junfan, \"Covert leo satellite communication aided by generative adversarial network based cooperative uav jamming,\" China Communications, vol. 21, no. 9, pp. 27-39, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 51, + 333, + 301, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 333, + 301, + 369 + ], + "spans": [ + { + "bbox": [ + 51, + 333, + 301, + 369 + ], + "type": "text", + "content": "[71] C. Zhang, G. Sun, J. Li, Q. Wu, J. Wang, D. Niyato, and Y. Liu, \"Multi-objective aerial collaborative secure communication optimization via generative diffusion model-enabled deep reinforcement learning,\" IEEE Transactions on Mobile Computing, pp. 1-18, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 51, + 370, + 301, + 406 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 370, + 301, + 406 + ], + "spans": [ + { + "bbox": [ + 51, + 370, + 301, + 406 + ], + "type": "text", + "content": "[72] T. Alladi, Naren, G. Bansal, V. Chamola, and M. Guizani, \"Secauthuav: A novel authentication scheme for uav-ground station and uav-uav communication,\" IEEE Transactions on Vehicular Technology, vol. 69, no. 12, pp. 15068-15077, 2020." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 51, + 407, + 301, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 407, + 301, + 443 + ], + "spans": [ + { + "bbox": [ + 51, + 407, + 301, + 443 + ], + "type": "text", + "content": "[73] R. Karmakar, G. Kaddoum, and O. Akhrif, \"A puf and fuzzy extractor-based uav-ground station and uav-uav authentication mechanism with intelligent adaptation of secure sessions,\" IEEE Transactions on Mobile Computing, vol. 23, no. 5, pp. 3858-3875, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 51, + 444, + 301, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 444, + 301, + 479 + ], + "spans": [ + { + "bbox": [ + 51, + 444, + 301, + 479 + ], + "type": "text", + "content": "[74] M. Tanveer, A. Aldosary, S.-u.-d. Khokhar, A. K. Das, S. A. Aldossari, and S. A. Chaudhry, “Paf-iod: Puf-enabled authentication framework for the internet of drones,” IEEE Transactions on Vehicular Technology, vol. 73, no. 7, pp. 9560–9574, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 51, + 480, + 301, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 480, + 301, + 517 + ], + "spans": [ + { + "bbox": [ + 51, + 480, + 301, + 517 + ], + "type": "text", + "content": "[75] S. J. Maeng, Y. Yapici, i. Guvenc, A. Bhuyan, and H. Dai, “Precoder design for physical-layer security and authentication in massive mimo uav communications,” IEEE Transactions on Vehicular Technology, vol. 71, no. 3, pp. 2949–2964, 2022." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 51, + 517, + 301, + 553 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 517, + 301, + 553 + ], + "spans": [ + { + "bbox": [ + 51, + 517, + 301, + 553 + ], + "type": "text", + "content": "[76] Y. Zhou, Z. Ma, H. Liu, P. L. Yeoh, Y. Li, B. Vucetic, and P. Fan, \"A uav-aided physical layer authentication based on channel characteristics and geographical locations,\" IEEE Transactions on Vehicular Technology, vol. 73, no. 1, pp. 1053–1064, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 51, + 554, + 301, + 582 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 554, + 301, + 582 + ], + "spans": [ + { + "bbox": [ + 51, + 554, + 301, + 582 + ], + "type": "text", + "content": "[77] Y. Zhou, Y. Wang, Z. Ma, P. Fan, and M. Xiao, \"Physical layer authentication for uav communications under rayleigh and rician channels,\" IEEE Transactions on Wireless Communications, pp. 1-1, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 51, + 582, + 301, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 582, + 301, + 609 + ], + "spans": [ + { + "bbox": [ + 51, + 582, + 301, + 609 + ], + "type": "text", + "content": "[78] Y.-S. Shiu, S. Y. Chang, H.-C. Wu, S. C.-H. Huang, and H.-H. Chen, \"Physical layer security in wireless networks: a tutorial,\" IEEE Wireless Communications, vol. 18, no. 2, pp. 66-74, 2011." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 51, + 609, + 301, + 636 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 609, + 301, + 636 + ], + "spans": [ + { + "bbox": [ + 51, + 609, + 301, + 636 + ], + "type": "text", + "content": "[79] J. Xu, D. Li, Z. Zhu, Z. Yang, N. Zhao, and D. Niyato, “Anti-jamming design for integrated sensing and communication via aerial iris,” IEEE Transactions on Communications, vol. 72, no. 8, pp. 4607–4619, 2024." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 51, + 638, + 301, + 673 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 638, + 301, + 673 + ], + "spans": [ + { + "bbox": [ + 51, + 638, + 301, + 673 + ], + "type": "text", + "content": "[80] B. Duo, Q. Wu, X. Yuan, and R. Zhang, “Anti-jamming 3d trajectory design for uav-enabled wireless sensor networks under probabilistic loss channel,” IEEE Transactions on Vehicular Technology, vol. 69, no. 12, pp. 16288-16293, 2020." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 51, + 674, + 301, + 710 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 674, + 301, + 710 + ], + "spans": [ + { + "bbox": [ + 51, + 674, + 301, + 710 + ], + "type": "text", + "content": "[81] Y. Wu, W. Yang, X. Guan, and Q. Wu, \"Energy-efficient trajectory design for uav-enabled communication under malicious jamming,\" IEEE Wireless Communications Letters, vol. 10, no. 2, pp. 206-210, 2021." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 51, + 711, + 301, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 711, + 301, + 747 + ], + "spans": [ + { + "bbox": [ + 51, + 711, + 301, + 747 + ], + "type": "text", + "content": "[82] Y. Wu, W. Yang, X. Guan, and Q. Wu, \"Uav-enabled relay communication under malicious jamming: Joint trajectory and transmit power optimization,\" IEEE Transactions on Vehicular Technology, vol. 70, no. 8, pp. 8275-8279, 2021." + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 312, + 56, + 563, + 747 + ], + "type": "list", + "angle": 0, + "index": 42, + "blocks": [ + { + "bbox": [ + 315, + 56, + 563, + 83 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 56, + 563, + 83 + ], + "spans": [ + { + "bbox": [ + 315, + 56, + 563, + 83 + ], + "type": "text", + "content": "[83] M. A. Aref, S. K. Jayaweera, and S. Machuzak, \"Multi-agent reinforcement learning based cognitive anti-jamming,\" in 2017 IEEE Wireless Communications and Networking Conference (WCNC), 2017, pp. 1-6." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 315, + 84, + 563, + 120 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 84, + 563, + 120 + ], + "spans": [ + { + "bbox": [ + 315, + 84, + 563, + 120 + ], + "type": "text", + "content": "[84] L. Jia, F. Yao, Y. Sun, Y. Xu, S. Feng, and A. Anpalagan, “A hierarchical learning solution for anti-jamming stackelberg game with discrete power strategies,” IEEE Wireless Communications Letters, vol. 6, no. 6, pp. 818–821, 2017." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 315, + 121, + 563, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 121, + 563, + 156 + ], + "spans": [ + { + "bbox": [ + 315, + 121, + 563, + 156 + ], + "type": "text", + "content": "[85] X. Liu, Y. Xu, L. Jia, Q. Wu, and A. Anpalagan, “Anti-jamming communications using spectrum waterfall: A deep reinforcement learning approach,” IEEE Communications Letters, vol. 22, no. 5, pp. 998–1001, 2018." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 315, + 156, + 563, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 156, + 563, + 201 + ], + "spans": [ + { + "bbox": [ + 315, + 156, + 563, + 201 + ], + "type": "text", + "content": "[86] H. Yang, Z. Xiong, J. Zhao, D. Niyato, Q. Wu, H. V. Poor, and M. Tornatore, \"Intelligent reflecting surface assisted anti-jamming communications: A fast reinforcement learning approach,\" IEEE Transactions on Wireless Communications, vol. 20, no. 3, pp. 1963-1974, 2021." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 315, + 201, + 563, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 201, + 563, + 237 + ], + "spans": [ + { + "bbox": [ + 315, + 201, + 563, + 237 + ], + "type": "text", + "content": "[87] Z. Yin, Y. Lin, Y. Zhang, Y. Qian, F. Shu, and J. Li, \"Collaborative multiagent reinforcement learning aided resource allocation for uav anti-jamming communication,\" IEEE Internet of Things Journal, vol. 9, no. 23, pp. 23995-24008, 2022." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 315, + 238, + 563, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 238, + 563, + 274 + ], + "spans": [ + { + "bbox": [ + 315, + 238, + 563, + 274 + ], + "type": "text", + "content": "[88] Y. Ma, K. Liu, Y. Liu, X. Wang, and Z. Zhao, \"An intelligent game-based anti-jamming solution using adversarial populations for aerial communication networks,\" IEEE Transactions on Cognitive Communications and Networking, pp. 1-1, 2024." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 315, + 275, + 563, + 311 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 275, + 563, + 311 + ], + "spans": [ + { + "bbox": [ + 315, + 275, + 563, + 311 + ], + "type": "text", + "content": "[89] Z. Shao, H. Yang, L. Xiao, W. Su, Y. Chen, and Z. Xiong, \"Deep reinforcement learning-based resource management for uav-assisted mobile edge computing against jamming,\" IEEE Transactions on Mobile Computing, vol. 23, no. 12, pp. 13358-13374, 2024." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 315, + 312, + 563, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 312, + 563, + 346 + ], + "spans": [ + { + "bbox": [ + 315, + 312, + 563, + 346 + ], + "type": "text", + "content": "[90] Y. Zhou, P. L. Yeoh, K. J. Kim, Z. Ma, Y. Li, and B. Vucetic, \"Game theoretic physical layer authentication for spoofing detection in uav communications,\" IEEE Transactions on Vehicular Technology, vol. 71, no. 6, pp. 6750-6755, 2022." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 315, + 347, + 563, + 384 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 347, + 563, + 384 + ], + "spans": [ + { + "bbox": [ + 315, + 347, + 563, + 384 + ], + "type": "text", + "content": "[91] Q. Cheng, Y. Zhou, H. Liu, L. Yang, Z. Ma, and P. Fan, \"Physical layer authentication in uav communications with channel randomness and jamming uncertainty,\" IEEE Transactions on Vehicular Technology, pp. 1-6, 2025." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 315, + 384, + 563, + 411 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 384, + 563, + 411 + ], + "spans": [ + { + "bbox": [ + 315, + 384, + 563, + 411 + ], + "type": "text", + "content": "[92] A. Eldosouky, A. Ferdowsi, and W. Saad, “Drones in distress: A game-theoretic countermeasure for protecting uavs against gps spoofing,” IEEE Internet of Things Journal, vol. 7, no. 4, pp. 2840–2854, 2020." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 315, + 411, + 563, + 438 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 411, + 563, + 438 + ], + "spans": [ + { + "bbox": [ + 315, + 411, + 563, + 438 + ], + "type": "text", + "content": "[93] D. She, W. Wang, Z. Yin, J. Wang, and H. Shan, \"Gps spoofing attack recognition for uavs with limited samples,\" IEEE Internet of Things Journal, vol. 12, no. 1, pp. 250-261, 2025." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 315, + 439, + 563, + 474 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 439, + 563, + 474 + ], + "spans": [ + { + "bbox": [ + 315, + 439, + 563, + 474 + ], + "type": "text", + "content": "[94] Y. Dang, C. Benzaid, B. Yang, T. Taleb, and Y. Shen, \"Deep-ensemble-learning-based gps spoofing detection for cellular-connected uavs,\" IEEE Internet of Things Journal, vol. 9, no. 24, pp. 25068-25085, 2022." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 315, + 475, + 563, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 475, + 563, + 510 + ], + "spans": [ + { + "bbox": [ + 315, + 475, + 563, + 510 + ], + "type": "text", + "content": "[95] X. Wang, J. Wang, Y. Xu, J. Chen, L. Jia, X. Liu, and Y. Yang, \"Dynamic spectrum anti-jamming communications: Challenges and opportunities,\" IEEE Communications Magazine, vol. 58, no. 2, pp. 79-85, 2020." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 315, + 511, + 563, + 546 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 511, + 563, + 546 + ], + "spans": [ + { + "bbox": [ + 315, + 511, + 563, + 546 + ], + "type": "text", + "content": "[96] L. Zhang, G. Ding, Q. Wu, and Z. Han, \"Spectrum sensing under spectrum misuse behaviors: A multi-hypothesis test perspective,\" IEEE Transactions on Information Forensics and Security, vol. 13, no. 4, pp. 993-1007, 2018." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 315, + 547, + 563, + 574 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 547, + 563, + 574 + ], + "spans": [ + { + "bbox": [ + 315, + 547, + 563, + 574 + ], + "type": "text", + "content": "[97] S. C. Hassler, U. A. Mughal, and M. Ismail, “Cyber-physical intrusion detection system for unmanned aerial vehicles,” IEEE Transactions on Intelligent Transportation Systems, vol. 25, no. 6, pp. 6106–6117, 2024." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 315, + 575, + 563, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 575, + 563, + 611 + ], + "spans": [ + { + "bbox": [ + 315, + 575, + 563, + 611 + ], + "type": "text", + "content": "[98] A. Krayani, A. S. Alam, L. Marcenaro, A. Nallanathan, and C. Regazzoni, \"An emergent self-awareness module for physical layer security in cognitive uav radios,\" IEEE Transactions on Cognitive Communications and Networking, vol. 8, no. 2, pp. 888-906, 2022." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 315, + 612, + 563, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 612, + 563, + 647 + ], + "spans": [ + { + "bbox": [ + 315, + 612, + 563, + 647 + ], + "type": "text", + "content": "[99] A. Krayani, A. S. Alam, L. Marcenaro, A. Nallanathan, and C. Regazzoni, \"Automatic jamming signal classification in cognitive uav radios,\" IEEE Transactions on Vehicular Technology, vol. 71, no. 12, pp. 12972-12988, 2022." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 312, + 647, + 563, + 684 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 647, + 563, + 684 + ], + "spans": [ + { + "bbox": [ + 312, + 647, + 563, + 684 + ], + "type": "text", + "content": "[100] A. Krayani, A. S. Alam, L. Marcenaro, A. Nallanathan, and C. Regazzoni, “A novel resource allocation for anti-jamming in cognitive-uavs: An active inference approach,” IEEE Communications Letters, vol. 26, no. 10, pp. 2272–2276, 2022." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 312, + 684, + 563, + 720 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 684, + 563, + 720 + ], + "spans": [ + { + "bbox": [ + 312, + 684, + 563, + 720 + ], + "type": "text", + "content": "[101] D. Darsena, G. Gelli, I. Iudice, and F. Verde, “Detection and blind channel estimation for uav-aided wireless sensor networks in smart cities under mobile jamming attack,” IEEE Internet of Things Journal, vol. 9, no. 14, pp. 11932–11950, 2022." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 312, + 720, + 563, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 720, + 563, + 747 + ], + "spans": [ + { + "bbox": [ + 312, + 720, + 563, + 747 + ], + "type": "text", + "content": "[102] L. Zhang, G. Ding, Q. Wu, and P. Liu, “Detection of abnormal power emission in uav communication networks,” IEEE Wireless Communications Letters, vol. 8, no. 4, pp. 1179–1182, 2019." + } + ] + } + ], + "index": 41 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 554, + 25, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 554, + 25, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 554, + 25, + 563, + 32 + ], + "type": "text", + "content": "28" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 27 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 56, + 301, + 748 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 47, + 56, + 301, + 93 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 56, + 301, + 93 + ], + "spans": [ + { + "bbox": [ + 47, + 56, + 301, + 93 + ], + "type": "text", + "content": "[103] T. M. Hoang, N. M. Nguyen, and T. Q. Duong, “Detection of eavesdropping attack in uav-aided wireless systems: Unsupervised learning with one-class svm and k-means clustering,” IEEE Wireless Communications Letters, vol. 9, no. 2, pp. 139–142, 2020." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 93, + 301, + 129 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 93, + 301, + 129 + ], + "spans": [ + { + "bbox": [ + 47, + 93, + 301, + 129 + ], + "type": "text", + "content": "[104] Y. An, R. Kang, Y. Ban, and S. Yang, “Beidou receiver based on anti-jamming antenna arrays with self-calibration for precise relative positioning,” Journal of Systems Engineering and Electronics, vol. 35, no. 5, pp. 1132–1147, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 129, + 301, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 129, + 301, + 182 + ], + "spans": [ + { + "bbox": [ + 47, + 129, + 301, + 182 + ], + "type": "text", + "content": "[105] H. Sathaye and A. Ranganathan, “Semperfi: a psychoer eliminating standalone gps receiver,” in Proceedings of the 13th ACM Conference on Security and Privacy in Wireless and Mobile Networks, ser. WiSec '20. New York, NY, USA: Association for Computing Machinery, 2020, p. 353–355. [Online]. Available: https://doi.org/10.1145/3395351.3401703" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 182, + 301, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 182, + 301, + 209 + ], + "spans": [ + { + "bbox": [ + 47, + 182, + 301, + 209 + ], + "type": "text", + "content": "[106] H. Sathaye, G. LaMountain, P. Closas, and A. Ranganathan, “Semperfi: Anti-spoofing gps receiver for uavs,” in Network and Distributed Systems Security (NDSS) Symposium 2022, 2022." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 209, + 301, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 209, + 301, + 236 + ], + "spans": [ + { + "bbox": [ + 47, + 209, + 301, + 236 + ], + "type": "text", + "content": "[107] S. Han, L. Chen, W. Meng, and C. Li, \"Improve the security of gnsss receivers through spoofing mitigation,\" IEEE Access, vol. 5, pp. 21057-21069, 2017." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 236, + 301, + 263 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 236, + 301, + 263 + ], + "spans": [ + { + "bbox": [ + 47, + 236, + 301, + 263 + ], + "type": "text", + "content": "[108] X. Ye, Y. Mao, X. Yu, S. Sun, L. Fu, and J. Xu, \"Integrated sensing and communications for low-altitude economy: A deep reinforcement learning approach,\" arXiv preprint arXiv:2412.04074, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 263, + 301, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 263, + 301, + 308 + ], + "spans": [ + { + "bbox": [ + 47, + 263, + 301, + 308 + ], + "type": "text", + "content": "[109] C. Huang, S. Fang, H. Wu, Y. Wang, and Y. Yang, \"Low-altitude intelligent transportation: System architecture, infrastructure, and key technologies,\" Journal of Industrial Information Integration, vol. 42, p. 100694, 2024. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S2452414X24001377" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 308, + 301, + 343 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 308, + 301, + 343 + ], + "spans": [ + { + "bbox": [ + 47, + 308, + 301, + 343 + ], + "type": "text", + "content": "[110] Y. Yang, Y. Chen, J. Wang, G. Sun, and D. Niyato, \"Embodied aiempowered low altitude economy: Integrated sensing, communications, computation, and control (isc3),\" arXiv preprint arXiv:2412.19996, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 344, + 301, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 344, + 301, + 380 + ], + "spans": [ + { + "bbox": [ + 47, + 344, + 301, + 380 + ], + "type": "text", + "content": "[111] J. Li, G. Sun, Q. Wu, S. Liang, J. Wang, D. Niyato, and D. I. Kim, \"Aerial secure collaborative communications under eavesdropper collusion in low-altitude economy: A generative swarm intelligent approach,\" arXiv preprint arXiv:2503.00721, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 380, + 301, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 380, + 301, + 407 + ], + "spans": [ + { + "bbox": [ + 47, + 380, + 301, + 407 + ], + "type": "text", + "content": "[112] G. Sun, W. Xie, D. Niyato, H. Du, J. Kang, J. Wu, S. Sun, and P. Zhang, \"Generative ai for advanced uav networking,\" IEEE Network, pp. 1-1, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 407, + 301, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 407, + 301, + 442 + ], + "spans": [ + { + "bbox": [ + 47, + 407, + 301, + 442 + ], + "type": "text", + "content": "[113] X. Tang, X. Li, R. Yu, Y. Wu, J. Ye, F. Tang, and Q. Chen, \"Digital-twin-assisted task assignment in multi-uav systems: A deep reinforcement learning approach,\" IEEE Internet of Things Journal, vol. 10, no. 17, pp. 15362-15375, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 442, + 301, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 442, + 301, + 479 + ], + "spans": [ + { + "bbox": [ + 47, + 442, + 301, + 479 + ], + "type": "text", + "content": "[114] X. Tang, Q. Chen, R. Yu, and X. Li, \"Digital twin-empowered task assignment in aerial mec network: A resource coalition cooperation approach with generative model,\" IEEE Transactions on Network Science and Engineering, vol. 12, no. 1, pp. 13-27, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 479, + 301, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 479, + 301, + 514 + ], + "spans": [ + { + "bbox": [ + 47, + 479, + 301, + 514 + ], + "type": "text", + "content": "[115] Y. Jiang, X. Li, G. Zhu, H. Li, J. Deng, and Q. Shi, \"6g non-terrestrial networks enabled low-altitude economy: Opportunities and challenges,\" ArXiv, vol. abs/2311.09047, 2023. [Online]. Available: https://api_semanticscholar.org/CorpusID:265213350" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 514, + 301, + 541 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 514, + 301, + 541 + ], + "spans": [ + { + "bbox": [ + 47, + 514, + 301, + 541 + ], + "type": "text", + "content": "[116] X. Luo, Y. Zhang, Z. He, G. Yang, and Z. Ji, \"A two-step environment-learning-based method for optimal uav deployment,\" IEEE Access, vol. 7, pp. 149328-149340, 2019." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 541, + 301, + 577 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 541, + 301, + 577 + ], + "spans": [ + { + "bbox": [ + 47, + 541, + 301, + 577 + ], + "type": "text", + "content": "[117] X. Tang, Q. Chen, W. Weng, B. Liao, J. Wang, X. Cao, and X. Li, \"Dnn task assignment in uav networks: A generative ai enhanced multi-agent reinforcement learning approach,\" IEEE Internet of Things Journal, pp. 1-1, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 577, + 301, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 577, + 301, + 621 + ], + "spans": [ + { + "bbox": [ + 47, + 577, + 301, + 621 + ], + "type": "text", + "content": "[118] H. Yang, J. Zhao, Z. Xiong, K.-Y. Lam, S. Sun, and L. Xiao, \"Privacy-preserving federated learning for uav-enabled networks: Learning-based joint scheduling and resource management,\" IEEE Journal on Selected Areas in Communications, vol. 39, no. 10, pp. 3144-3159, 2021." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 47, + 622, + 301, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 622, + 301, + 666 + ], + "spans": [ + { + "bbox": [ + 47, + 622, + 301, + 666 + ], + "type": "text", + "content": "[119] X. Cai, T. Izydorczyk, J. Rodríguez-Pineiro, I. Z. Kovács, J. Wigard, F. M. L. Tavares, and P. E. Mogensen, \"Empirical low-altitude air-to-ground spatial channel characterization for cellular networks connectivity,\" IEEE Journal on Selected Areas in Communications, vol. 39, no. 10, pp. 2975-2991, 2021." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 47, + 667, + 301, + 702 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 667, + 301, + 702 + ], + "spans": [ + { + "bbox": [ + 47, + 667, + 301, + 702 + ], + "type": "text", + "content": "[120] Y. Zhao, F. Zhou, L. Feng, W. Li, Y. Sun, and M. A. Imran, \"Backhaul-constrained coverage analysis of integrated high and low altitude platforms aerial communication system in post-disaster areas,\" IEEE Communications Letters, vol. 27, no. 6, pp. 1629-1633, 2023." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 47, + 703, + 301, + 748 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 703, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 47, + 703, + 301, + 748 + ], + "type": "text", + "content": "[121] S. H. Alsamhi, F. A. Almalki, F. Afghah, A. Hawbani, A. V. Shvetsov, B. Lee, and H. Song, \"Drones' edge intelligence over smart environments in b5g: Blockchain and federated learning synergy,\" IEEE Transactions on Green Communications and Networking, vol. 6, no. 1, pp. 295-312, 2022." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 56, + 563, + 748 + ], + "type": "list", + "angle": 0, + "index": 41, + "blocks": [ + { + "bbox": [ + 310, + 56, + 563, + 93 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 56, + 563, + 93 + ], + "spans": [ + { + "bbox": [ + 310, + 56, + 563, + 93 + ], + "type": "text", + "content": "[122] A. Ahmad, A. A. Cheema, and D. Finlay, \"A survey of radio propagation channel modelling for low altitude flying base stations,\" Computer Networks, vol. 171, p. 107122, 2020. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S1389128619310692" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 310, + 93, + 563, + 120 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 93, + 563, + 120 + ], + "spans": [ + { + "bbox": [ + 310, + 93, + 563, + 120 + ], + "type": "text", + "content": "[123] I. Bozcan and E. Kayacan, \"Context-dependent anomaly detection for low altitude traffic surveillance,\" in 2021 IEEE International Conference on Robotics and Automation (ICRA), 2021, pp. 224-230." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 310, + 120, + 563, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 120, + 563, + 156 + ], + "spans": [ + { + "bbox": [ + 310, + 120, + 563, + 156 + ], + "type": "text", + "content": "[124] Y. Liu, X. Gong, and Y. Yang, \"A multilayer fusion network with rotation-invariant and dynamic feature representation for multiview low-altitude image registration,\" IEEE Geoscience and Remote Sensing Letters, vol. 18, no. 6, pp. 1019-1023, 2021." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 156, + 563, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 156, + 563, + 183 + ], + "spans": [ + { + "bbox": [ + 310, + 156, + 563, + 183 + ], + "type": "text", + "content": "[125] A. Omri and M. O. Hasna, \"Physical layer security analysis of uav based communication networks,\" in 2018 IEEE 88th Vehicular Technology Conference (VTC-Fall), 2018, pp. 1-6." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 183, + 563, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 183, + 563, + 209 + ], + "spans": [ + { + "bbox": [ + 310, + 183, + 563, + 209 + ], + "type": "text", + "content": "[126] S. Samonas and D. Coss, “The cia strikes back: Redefining confidentiality, integrity and availability in security.” Journal of Information System Security, vol. 10, no. 3, 2014." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 209, + 563, + 245 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 209, + 563, + 245 + ], + "spans": [ + { + "bbox": [ + 310, + 209, + 563, + 245 + ], + "type": "text", + "content": "[127] C. Zhao, H. Du, D. Niyato, J. Kang, Z. Xiong, D. I. Kim, X. Shen, and K. B. Letaief, \"Generative ai for secure physical layer communications: A survey,\" IEEE Transactions on Cognitive Communications and Networking, vol. 11, no. 1, pp. 3-26, 2025." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 245, + 563, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 245, + 563, + 281 + ], + "spans": [ + { + "bbox": [ + 310, + 245, + 563, + 281 + ], + "type": "text", + "content": "[128] J. M. Hamamreh, H. M. Furqan, and H. Arslan, \"Classifications and applications of physical layer security techniques for confidentiality: A comprehensive survey,\" IEEE Communications Surveys & Tutorials, vol. 21, no. 2, pp. 1773-1828, 2019." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 281, + 563, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 281, + 563, + 308 + ], + "spans": [ + { + "bbox": [ + 310, + 281, + 563, + 308 + ], + "type": "text", + "content": "[129] M. Shakiba-Herfeh, A. Chorti, and H. Vincent Poor, “Physical layer security: Authentication, integrity, and confidentiality,” Physical layer security, pp. 129–150, 2021." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 308, + 563, + 343 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 308, + 563, + 343 + ], + "spans": [ + { + "bbox": [ + 310, + 308, + 563, + 343 + ], + "type": "text", + "content": "[130] S. Hu, Q. Wu, and X. Wang, \"Energy management and trajectory optimization for uav-enabled legitimate monitoring systems,\" IEEE Transactions on Wireless Communications, vol. 20, no. 1, pp. 142-155, 2021." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 344, + 563, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 344, + 563, + 371 + ], + "spans": [ + { + "bbox": [ + 310, + 344, + 563, + 371 + ], + "type": "text", + "content": "[131] D. Wang, B. Bai, W. Zhao, and Z. Han, “A survey of optimization approaches for wireless physical layer security,” IEEE Communications Surveys & Tutorials, vol. 21, no. 2, pp. 1878–1911, 2019." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 310, + 371, + 563, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 371, + 563, + 407 + ], + "spans": [ + { + "bbox": [ + 310, + 371, + 563, + 407 + ], + "type": "text", + "content": "[132] M. A. Arfaoui, M. D. Soltani, I. Tavakkolnia, A. Ghrayeb, M. Safari, C. M. Assi, and H. Haas, \"Physical layer security for visible light communication systems: A survey,\" IEEE Communications Surveys & Tutorials, vol. 22, no. 3, pp. 1887-1908, 2020." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 310, + 407, + 563, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 407, + 563, + 442 + ], + "spans": [ + { + "bbox": [ + 310, + 407, + 563, + 442 + ], + "type": "text", + "content": "[133] Z. Yin, M. Jia, N. Cheng, W. Wang, F. Lyu, Q. Guo, and X. Shen, \"Uav-assisted physical layer security in multi-beam satellite-enabled vehicle communications,\" IEEE Transactions on Intelligent Transportation Systems, vol. 23, no. 3, pp. 2739-2751, 2022." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 310, + 442, + 563, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 442, + 563, + 479 + ], + "spans": [ + { + "bbox": [ + 310, + 442, + 563, + 479 + ], + "type": "text", + "content": "[134] X. Fang, N. Zhang, S. Zhang, D. Chen, X. Sha, and X. Shen, \"On physical layer security: Weighted fractional fourier transform based user cooperation,\" IEEE Transactions on Wireless Communications, vol. 16, no. 8, pp. 5498-5510, 2017." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 310, + 479, + 563, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 479, + 563, + 514 + ], + "spans": [ + { + "bbox": [ + 310, + 479, + 563, + 514 + ], + "type": "text", + "content": "[135] W. Tian, X. Ding, G. Liu, Y. Dai, and Z. Han, “A uav-assisted secure communication system by jointly optimizing transmit power and trajectory in the internet of things,” IEEE Transactions on Green Communications and Networking, vol. 7, no. 4, pp. 2025–2037, 2023." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 310, + 514, + 563, + 559 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 514, + 563, + 559 + ], + "spans": [ + { + "bbox": [ + 310, + 514, + 563, + 559 + ], + "type": "text", + "content": "[136] F. Irram, M. Ali, M. Naeem, and S. Mumtaz, \"Physical layer security for beyond 5g/6g networks: Emerging technologies and future directions,\" Journal of Network and Computer Applications, vol. 206, p. 103431, 2022. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S108480452200087X" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 310, + 559, + 563, + 604 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 559, + 563, + 604 + ], + "spans": [ + { + "bbox": [ + 310, + 559, + 563, + 604 + ], + "type": "text", + "content": "[137] W. Lu, P. Si, F. Lu, B. Li, Z. Liu, S. Hu, and Y. Gong, \"Resource and trajectory optimization in uav-powered wireless communication system,\" Science China Information Sciences, vol. 64, no. 4, p. 140304, Mar 2021, accessed: 2025-01-03. [Online]. Available: https://doi.org/10.1007/s11432-020-3060-4" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 310, + 604, + 563, + 639 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 604, + 563, + 639 + ], + "spans": [ + { + "bbox": [ + 310, + 604, + 563, + 639 + ], + "type": "text", + "content": "[138] J. Luo, Z. Wang, M. Xia, L. Wu, Y. Tian, and Y. Chen, \"Path planning for uav communication networks: Related technologies, solutions, and opportunities,\" ACM Comput. Surv., vol. 55, no. 9, Jan. 2023. [Online]. Available: https://doi-org.remotexs.ntu.edu.sg/10.1145/3560261" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 310, + 639, + 563, + 676 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 639, + 563, + 676 + ], + "spans": [ + { + "bbox": [ + 310, + 639, + 563, + 676 + ], + "type": "text", + "content": "[139] A. V. Savkin, H. Huang, and W. Ni, “Securing uav communication in the presence of stationary or mobile eavesdroppers via online 3d trajectory planning,” IEEE Wireless Communications Letters, vol. 9, no. 8, pp. 1211–1215, 2020." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 310, + 676, + 563, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 676, + 563, + 712 + ], + "spans": [ + { + "bbox": [ + 310, + 676, + 563, + 712 + ], + "type": "text", + "content": "[140] X. Zhou, Q. Wu, S. Yan, F. Shu, and J. Li, \"Uav-enabled secure communications: Joint trajectory and transmit power optimization,\" IEEE Transactions on Vehicular Technology, vol. 68, no. 4, pp. 4069-4073, 2019." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 310, + 712, + 563, + 748 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 712, + 563, + 748 + ], + "spans": [ + { + "bbox": [ + 310, + 712, + 563, + 748 + ], + "type": "text", + "content": "[141] R. Ding, F. Gao, and X. S. Shen, \"3d uav trajectory design and frequency band allocation for energy-efficient and fair communication: A deep reinforcement learning approach,\" IEEE Transactions on Wireless Communications, vol. 19, no. 12, pp. 7796-7809, 2020." + } + ] + } + ], + "index": 40 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 554, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 554, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 554, + 24, + 563, + 32 + ], + "type": "text", + "content": "29" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 28 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 57, + 301, + 747 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 47, + 57, + 301, + 83 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 57, + 301, + 83 + ], + "spans": [ + { + "bbox": [ + 47, + 57, + 301, + 83 + ], + "type": "text", + "content": "[142] C. Zhong, J. Yao, and J. Xu, \"Secure uav communication with cooperative jamming and trajectory control,\" IEEE Communications Letters, vol. 23, no. 2, pp. 286-289, 2019." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 84, + 301, + 120 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 84, + 301, + 120 + ], + "spans": [ + { + "bbox": [ + 47, + 84, + 301, + 120 + ], + "type": "text", + "content": "[143] Y. Bai, H. Zhao, X. Zhang, Z. Chang, R. Jantti, and K. Yang, \"Toward autonomous multi-uav wireless network: A survey of reinforcement learning-based approaches,\" IEEE Communications Surveys & Tutorials, vol. 25, no. 4, pp. 3038-3067, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 121, + 301, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 121, + 301, + 156 + ], + "spans": [ + { + "bbox": [ + 47, + 121, + 301, + 156 + ], + "type": "text", + "content": "[144] R. Dong, B. Wang, K. Cao, J. Tian, and T. Cheng, \"Secure transmission design of ris enabled uav communication networks exploiting deep reinforcement learning,\" IEEE Transactions on Vehicular Technology, vol. 73, no. 6, pp. 8404-8419, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 157, + 301, + 192 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 157, + 301, + 192 + ], + "spans": [ + { + "bbox": [ + 47, + 157, + 301, + 192 + ], + "type": "text", + "content": "[145] X. Tang, T. Jiang, J. Liu, B. Li, D. Zhai, F. R. Yu, and Z. Han, \"Secure communication with uav-enabled aerial ris: Learning trajectory with reflection optimization,\" IEEE Transactions on Intelligent Vehicles, pp. 1-10, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 194, + 301, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 194, + 301, + 229 + ], + "spans": [ + { + "bbox": [ + 47, + 194, + 301, + 229 + ], + "type": "text", + "content": "[146] J. Duan, Y. Guan, S. E. Li, Y. Ren, Q. Sun, and B. Cheng, \"Distribu-tional soft actor-critic: Off-policy reinforcement learning for addressing value estimation errors,\" IEEE Transactions on Neural Networks and Learning Systems, vol. 33, no. 11, pp. 6584-6598, 2022." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 230, + 301, + 265 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 230, + 301, + 265 + ], + "spans": [ + { + "bbox": [ + 47, + 230, + 301, + 265 + ], + "type": "text", + "content": "[147] W. Chen, X. Qiu, T. Cai, H.-N. Dai, Z. Zheng, and Y. Zhang, “Deep reinforcement learning for internet of things: A comprehensive survey,” IEEE Communications Surveys & Tutorials, vol. 23, no. 3, pp. 1659–1692, 2021." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 266, + 301, + 302 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 266, + 301, + 302 + ], + "spans": [ + { + "bbox": [ + 47, + 266, + 301, + 302 + ], + "type": "text", + "content": "[148] F. Tang, H. Hofner, N. Kato, K. Kaneko, Y. Yamashita, and M. Hangai, “A deep reinforcement learning-based dynamic traffic offloading in space-air-ground integrated networks (sagin),” IEEE Journal on Selected Areas in Communications, vol. 40, no. 1, pp. 276–289, 2022." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 303, + 301, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 303, + 301, + 338 + ], + "spans": [ + { + "bbox": [ + 47, + 303, + 301, + 338 + ], + "type": "text", + "content": "[149] N. Yang, S. Chen, H. Zhang, and R. Berry, “Beyond the edge: An advanced exploration of reinforcement learning for mobile edge computing, its applications, and future research trajectories,” IEEE Communications Surveys & Tutorials, pp. 1-1, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 339, + 301, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 339, + 301, + 365 + ], + "spans": [ + { + "bbox": [ + 47, + 339, + 301, + 365 + ], + "type": "text", + "content": "[150] Q. Mao, F. Hu, and Q. Hao, “Deep learning for intelligent wireless networks: A comprehensive survey,” IEEE Communications Surveys & Tutorials, vol. 20, no. 4, pp. 2595–2621, 2018." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 366, + 301, + 402 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 366, + 301, + 402 + ], + "spans": [ + { + "bbox": [ + 47, + 366, + 301, + 402 + ], + "type": "text", + "content": "[151] P. Consul, I. Budhiraja, and D. Garg, \"A hybrid secure resource allocation and trajectory optimization approach for mobile edge computing using federated learning based on web 3.0,\" IEEE Transactions on Consumer Electronics, vol. 70, no. 1, pp. 1167-1179, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 402, + 301, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 402, + 301, + 430 + ], + "spans": [ + { + "bbox": [ + 47, + 402, + 301, + 430 + ], + "type": "text", + "content": "[152] X. Hou, J. Wang, Z. Zhang, J. Wang, L. Liu, and Y. Ren, \"Split federated learning for uav-enabled integrated sensing, computation, and communication,\" arXiv preprint arXiv:2504.01443, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 430, + 301, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 430, + 301, + 464 + ], + "spans": [ + { + "bbox": [ + 47, + 430, + 301, + 464 + ], + "type": "text", + "content": "[153] K. Heo, W. Lee, and K. Lee, “Uav-assisted wireless-powered secure communications: Integration of optimization and deep learning,” IEEE Transactions on Wireless Communications, vol. 23, no. 9, pp. 10530–10545, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 466, + 301, + 493 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 466, + 301, + 493 + ], + "spans": [ + { + "bbox": [ + 47, + 466, + 301, + 493 + ], + "type": "text", + "content": "[154] U. A. Mughal, Y. Alkhrijah, A. Almadhor, and C. Yuen, “Deep learning for secure uav-assisted ris communication networks,” IEEE Internet of Things Magazine, vol. 7, no. 2, pp. 38-44, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 494, + 301, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 494, + 301, + 520 + ], + "spans": [ + { + "bbox": [ + 47, + 494, + 301, + 520 + ], + "type": "text", + "content": "[155] R. Dong, B. Wang, and K. Cao, \"Deep learning driven 3d robust beamforming for secure communication of uav systems,\" IEEE Wireless Communications Letters, vol. 10, no. 8, pp. 1643-1647, 2021." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 521, + 301, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 521, + 301, + 555 + ], + "spans": [ + { + "bbox": [ + 47, + 521, + 301, + 555 + ], + "type": "text", + "content": "[156] M. Chen, U. Challita, W. Saad, C. Yin, and M. Debbah, \"Artificial neural networks-based machine learning for wireless networks: A tutorial,\" IEEE Communications Surveys & Tutorials, vol. 21, no. 4, pp. 3039-3071, 2019." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 557, + 301, + 592 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 557, + 301, + 592 + ], + "spans": [ + { + "bbox": [ + 47, + 557, + 301, + 592 + ], + "type": "text", + "content": "[157] M. T. Nguyen and L. B. Le, “Multi-uav trajectory control, resource allocation, and nomai user pairing for uplink energy minimization,” IEEE Internet of Things Journal, vol. 9, no. 23, pp. 23 728–23 740, 2022." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 47, + 593, + 301, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 593, + 301, + 628 + ], + "spans": [ + { + "bbox": [ + 47, + 593, + 301, + 628 + ], + "type": "text", + "content": "[158] X. Liao, J. Shi, Z. Li, L. Zhang, and B. Xia, “A model-driven deep reinforcement learning heuristic algorithm for resource allocation in ultra-dense cellular networks,” IEEE Transactions on Vehicular Technology, vol. 69, no. 1, pp. 983–997, 2020." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 47, + 629, + 301, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 629, + 301, + 665 + ], + "spans": [ + { + "bbox": [ + 47, + 629, + 301, + 665 + ], + "type": "text", + "content": "[159] X. Liao, J. Si, J. Shi, Z. Li, and H. Ding, \"Generative adversarial network assisted power allocation for cooperative cognitive covert communication system,\" IEEE Communications Letters, vol. 24, no. 7, pp. 1463-1467, 2020." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 47, + 666, + 301, + 701 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 666, + 301, + 701 + ], + "spans": [ + { + "bbox": [ + 47, + 666, + 301, + 701 + ], + "type": "text", + "content": "[160] Y. Zhou, P. L. Yeoh, H. Chen, Y. Li, R. Schober, L. Zhuo, and B. Vucetic, \"Improving physical layer security via a uav friendly jammer for unknown eavesdropper location,\" IEEE Transactions on Vehicular Technology, vol. 67, no. 11, pp. 11280-11284, 2018." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 47, + 702, + 301, + 729 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 702, + 301, + 729 + ], + "spans": [ + { + "bbox": [ + 47, + 702, + 301, + 729 + ], + "type": "text", + "content": "[161] H. Cao, C. Tan, Z. Gao, Y. Xu, G. Chen, P.-A. Heng, and S. Z. Li, “A survey on generative diffusion models,” IEEE Transactions on Knowledge and Data Engineering, vol. 36, no. 7, pp. 2814–2830, 2024." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 47, + 730, + 301, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 730, + 301, + 747 + ], + "spans": [ + { + "bbox": [ + 47, + 730, + 301, + 747 + ], + "type": "text", + "content": "[162] D. Chen, N. Zhang, N. Cheng, K. Zhang, Z. Qin, and X. Shen, \"Physical layer based message authentication with secure channel" + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 57, + 563, + 747 + ], + "type": "list", + "angle": 0, + "index": 45, + "blocks": [ + { + "bbox": [ + 331, + 57, + 563, + 75 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 57, + 563, + 75 + ], + "spans": [ + { + "bbox": [ + 331, + 57, + 563, + 75 + ], + "type": "text", + "content": "codes,\" IEEE Transactions on Dependable and Secure Computing, vol. 17, no. 5, pp. 1079-1093, 2020." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 76, + 563, + 102 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 76, + 563, + 102 + ], + "spans": [ + { + "bbox": [ + 310, + 76, + 563, + 102 + ], + "type": "text", + "content": "[163] G. Bansal and B. Sikdar, “S-maps: Scalable mutual authentication protocol for dynamic uav swarms,” IEEE Transactions on Vehicular Technology, vol. 70, no. 11, pp. 12088-12100, 2021." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 102, + 563, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 102, + 563, + 137 + ], + "spans": [ + { + "bbox": [ + 310, + 102, + 563, + 137 + ], + "type": "text", + "content": "[164] B. Chatterjee, D. Das, S. Maity, and S. Sen, \"Rf-puf: Enhancing iot security through authentication of wireless nodes using in-situ machine learning,\" IEEE Internet of Things Journal, vol. 6, no. 1, pp. 388-398, 2019." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 138, + 563, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 138, + 563, + 173 + ], + "spans": [ + { + "bbox": [ + 310, + 138, + 563, + 173 + ], + "type": "text", + "content": "[165] G. Bansal, N. Naren, V. Chamola, B. Sikdar, N. Kumar, and M. Guizani, \"Lightweight mutual authentication protocol for v2g using physical unclonable function,\" IEEE Transactions on Vehicular Technology, vol. 69, no. 7, pp. 7234-7246, 2020." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 174, + 563, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 174, + 563, + 209 + ], + "spans": [ + { + "bbox": [ + 310, + 174, + 563, + 209 + ], + "type": "text", + "content": "[166] C. Pu, A. Wall, K.-K. R. Choo, I. Ahmed, and S. Lim, \"A lightweight and privacy-preserving mutual authentication and key agreement protocol for internet of drones environment,\" IEEE Internet of Things Journal, vol. 9, no. 12, pp. 9918-9933, 2022." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 209, + 563, + 253 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 209, + 563, + 253 + ], + "spans": [ + { + "bbox": [ + 310, + 209, + 563, + 253 + ], + "type": "text", + "content": "[167] Z. Zhang, C. Hsu, M. H. Au, L. Harn, J. Cui, Z. Xia, and Z. Zhao, \"Prlap-iod: A puf-based robust and lightweight authentication protocol for internet of drones,\" Computer Networks, vol. 238, p. 110118, 2024. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S1389128623005637" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 254, + 563, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 254, + 563, + 281 + ], + "spans": [ + { + "bbox": [ + 310, + 254, + 563, + 281 + ], + "type": "text", + "content": "[168] J. Liu and X. Wang, \"Physical layer authentication enhancement using two-dimensional channel quantization,\" IEEE Transactions on Wireless Communications, vol. 15, no. 6, pp. 4171-4182, 2016." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 281, + 563, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 281, + 563, + 308 + ], + "spans": [ + { + "bbox": [ + 310, + 281, + 563, + 308 + ], + "type": "text", + "content": "[169] X. Lu, J. Lei, Y. Shi, and W. Li, \"Improved physical layer authentication scheme based on wireless channel phase,\" IEEE Wireless Communications Letters, vol. 11, no. 1, pp. 198-202, 2022." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 310, + 308, + 563, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 308, + 563, + 335 + ], + "spans": [ + { + "bbox": [ + 310, + 308, + 563, + 335 + ], + "type": "text", + "content": "[170] N. Xie, J. Chen, and L. Huang, “Physical-layer authentication using multiple channel-based features,” IEEE Transactions on Information Forensics and Security, vol. 16, pp. 2356-2366, 2021." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 310, + 335, + 563, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 335, + 563, + 371 + ], + "spans": [ + { + "bbox": [ + 310, + 335, + 563, + 371 + ], + "type": "text", + "content": "[171] Y. Zhou, Z. Ma, H. Liu, P. L. Yeoh, Y. Li, and B. Vucetic, \"Signal-to-noise ratio based physical layer authentication in uav communications,\" in 2023 IEEE 34th Annual International Symposium on Personal, Indoor and Mobile Radio Communications (PIMRC), 2023, pp. 1-6." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 310, + 372, + 563, + 406 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 372, + 563, + 406 + ], + "spans": [ + { + "bbox": [ + 310, + 372, + 563, + 406 + ], + "type": "text", + "content": "[172] Y. Shang, Y. Peng, R. Ye, and J. Lee, “Ris-assisted secure uav communication scheme against active jamming and passive eavesdropping,” IEEE Transactions on Intelligent Transportation Systems, vol. 25, no. 11, pp. 16953-16963, 2024." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 310, + 407, + 563, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 407, + 563, + 441 + ], + "spans": [ + { + "bbox": [ + 310, + 407, + 563, + 441 + ], + "type": "text", + "content": "[173] Y. Wu, X. Guan, W. Yang, and Q. Wu, “Uav swarm communication under malicious jamming: Joint trajectory and clustering design,” IEEE Wireless Communications Letters, vol. 10, no. 10, pp. 2264–2268, 2021." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 310, + 442, + 563, + 478 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 442, + 563, + 478 + ], + "spans": [ + { + "bbox": [ + 310, + 442, + 563, + 478 + ], + "type": "text", + "content": "[174] Z. Shen, K. Xu, and X. Xia, \"Beam-domain anti-jamming transmission for downlink massive mimo systems: A stackelberg game perspective,\" IEEE Transactions on Information Forensics and Security, vol. 16, pp. 2727-2742, 2021." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 310, + 479, + 563, + 506 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 479, + 563, + 506 + ], + "spans": [ + { + "bbox": [ + 310, + 479, + 563, + 506 + ], + "type": "text", + "content": "[175] X. Li, J. Chen, X. Ling, and T. Wu, “Deep reinforcement learning-based anti-jamming algorithm using dual action network,” IEEE Transactions on Wireless Communications, vol. 22, no. 7, pp. 4625–4637, 2023." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 310, + 506, + 563, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 506, + 563, + 532 + ], + "spans": [ + { + "bbox": [ + 310, + 506, + 563, + 532 + ], + "type": "text", + "content": "[176] L. Jia, N. Qi, F. Chu, S. Fang, X. Wang, S. Ma, and S. Feng, \"Game-theoretic learning anti-jamming approaches in wireless networks,\" IEEE Communications Magazine, vol. 60, no. 5, pp. 60-66, 2022." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 310, + 533, + 563, + 559 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 533, + 563, + 559 + ], + "spans": [ + { + "bbox": [ + 310, + 533, + 563, + 559 + ], + "type": "text", + "content": "[177] F. Yao and L. Jia, “A collaborative multi-agent reinforcement learning anti-jamming algorithm in wireless networks,” IEEE Wireless Communications Letters, vol. 8, no. 4, pp. 1024–1027, 2019." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 310, + 559, + 563, + 594 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 559, + 563, + 594 + ], + "spans": [ + { + "bbox": [ + 310, + 559, + 563, + 594 + ], + "type": "text", + "content": "[178] E. Schmidt, N. Gatsis, and D. Akopian, “A gps spoofing detection and classification correlator-based technique using the lasso,” IEEE Transactions on Aerospace and Electronic Systems, vol. 56, no. 6, pp. 4224–4237, 2020." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 310, + 594, + 563, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 594, + 563, + 622 + ], + "spans": [ + { + "bbox": [ + 310, + 594, + 563, + 622 + ], + "type": "text", + "content": "[179] B. Pardhasaradhi and L. R. Cenkeramaddi, \"Gps spoofing detection and mitigation for drones using distributed radar tracking and fusion,\" IEEE Sensors Journal, vol. 22, no. 11, pp. 11 122-11 134, 2022." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 310, + 622, + 563, + 648 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 622, + 563, + 648 + ], + "spans": [ + { + "bbox": [ + 310, + 622, + 563, + 648 + ], + "type": "text", + "content": "[180] Z. Chen, J. Li, J. Li, X. Zhu, and C. Li, \"Gnss multiparameter spoofing detection method based on support vector machine,\" IEEE Sensors Journal, vol. 22, no. 18, pp. 17864-17874, 2022." + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 310, + 649, + 563, + 684 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 649, + 563, + 684 + ], + "spans": [ + { + "bbox": [ + 310, + 649, + 563, + 684 + ], + "type": "text", + "content": "[181] X. Chen, D. He, X. Yan, W. Yu, and T.-K. Truong, \"Gnss interference type recognition with fingerprint spectrum dnn method,\" IEEE Transactions on Aerospace and Electronic Systems, vol. 58, no. 5, pp. 4745-4760, 2022." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 310, + 685, + 563, + 720 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 685, + 563, + 720 + ], + "spans": [ + { + "bbox": [ + 310, + 685, + 563, + 720 + ], + "type": "text", + "content": "[182] Y. Dang, C. Benzaïd, Y. Shen, and T. Taleb, \"Gps spoofing detector with adaptive trustable residence area for cellular based-uavs,\" in GLOBECOM 2020 - 2020 IEEE Global Communications Conference, 2020, pp. 1-6." + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 310, + 720, + 563, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 720, + 563, + 747 + ], + "spans": [ + { + "bbox": [ + 310, + 720, + 563, + 747 + ], + "type": "text", + "content": "[183] V. Chandola, A. Banerjee, and V. Kumar, \"Anomaly detection: A survey,\" ACM Comput. Surv., vol. 41, no. 3, Jul. 2009. [Online]. Available: https://doi.org/10.1145/1541880.1541882" + } + ] + } + ], + "index": 44 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 555, + 24, + 563, + 32 + ], + "type": "text", + "content": "30" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 29 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 57, + 301, + 747 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 47, + 57, + 301, + 83 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 57, + 301, + 83 + ], + "spans": [ + { + "bbox": [ + 47, + 57, + 301, + 83 + ], + "type": "text", + "content": "[184] B. Balaji and K. Friston, \"Bayesian state estimation using generalized coordinates,\" Signal processing, sensor fusion, and target recognition XX, vol. 8050, pp. 716-727, 2011." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 85, + 301, + 121 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 85, + 301, + 121 + ], + "spans": [ + { + "bbox": [ + 47, + 85, + 301, + 121 + ], + "type": "text", + "content": "[185] M. Baydoun, D. Campo, V. Sanguineti, L. Marcenaro, A. Cavallaro, and C. Regazzoni, “Learning switching models for abnormality detection for autonomous driving,” in 2018 21st International Conference on Information Fusion (FUSION), 2018, pp. 2606–2613." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 121, + 301, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 121, + 301, + 139 + ], + "spans": [ + { + "bbox": [ + 47, + 121, + 301, + 139 + ], + "type": "text", + "content": "[186] L. Pardo, Statistical inference based on divergence measures. Chapman and Hall/CRC, 2018." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 140, + 301, + 176 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 140, + 301, + 176 + ], + "spans": [ + { + "bbox": [ + 47, + 140, + 301, + 176 + ], + "type": "text", + "content": "[187] A. Krayani, M. Baydoun, L. Marcenaro, A. S. Alam, and C. Regazzoni, \"Self-learning bayesian generative models for jammer detection in cognitive-uav-radios,\" in GLOBECOM 2020 - 2020 IEEE Global Communications Conference, 2020, pp. 1-7." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 177, + 301, + 213 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 177, + 301, + 213 + ], + "spans": [ + { + "bbox": [ + 47, + 177, + 301, + 213 + ], + "type": "text", + "content": "[188] W. Xie, G. Sun, J. Wang, H. Du, J. Kang, K. Huang, and V. Leung, “Multi-objective aerial iris-assisted isac optimization via generative ai-enhanced deep reinforcement learning,” arXiv preprint arXiv:2502.10687, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 213, + 301, + 241 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 213, + 301, + 241 + ], + "spans": [ + { + "bbox": [ + 47, + 213, + 301, + 241 + ], + "type": "text", + "content": "[189] J. Wang, H. Du, Y. Liu, G. Sun, D. Niyato, S. Mao, D. I. Kim, and X. Shen, \"Generative ai based secure wireless sensing for isac networks,\" arXiv preprint arXiv:2408.11398, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 242, + 301, + 269 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 242, + 301, + 269 + ], + "spans": [ + { + "bbox": [ + 47, + 242, + 301, + 269 + ], + "type": "text", + "content": "[190] X. Wang, C. P. Tan, Y. Wang, and X. Wang, “Defending uav networks against covert attacks using auxiliary signal injections,” IEEE Transactions on Automation Science and Engineering, pp. 1–13, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 270, + 301, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 270, + 301, + 304 + ], + "spans": [ + { + "bbox": [ + 47, + 270, + 301, + 304 + ], + "type": "text", + "content": "[191] M. Valkama, M. Renfors, and V. Koivunen, “Advanced methods for i/q imbalance compensation in communication receivers,” IEEE Transactions on Signal Processing, vol. 49, no. 10, pp. 2335–2344, 2001." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 306, + 301, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 306, + 301, + 342 + ], + "spans": [ + { + "bbox": [ + 47, + 306, + 301, + 342 + ], + "type": "text", + "content": "[192] J. Zhang and Y. R. Zheng, \"Frequency-domain turbo equalization with soft successive interference cancellation for single carrier mimo underwater acoustic communications,\" IEEE Transactions on Wireless Communications, vol. 10, no. 9, pp. 2872-2882, 2011." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 343, + 301, + 379 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 343, + 301, + 379 + ], + "spans": [ + { + "bbox": [ + 47, + 343, + 301, + 379 + ], + "type": "text", + "content": "[193] P. Madhani, P. Axelrad, K. Krumvieda, and J. Thomas, \"Application of successive interference cancellation to the gps pseudolite near-far problem,\" IEEE Transactions on Aerospace and Electronic Systems, vol. 39, no. 2, pp. 481-488, 2003." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 380, + 301, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 380, + 301, + 407 + ], + "spans": [ + { + "bbox": [ + 47, + 380, + 301, + 407 + ], + "type": "text", + "content": "[194] P. Patel and J. Holtzman, \"Analysis of a simple successive interference cancellation scheme in a ds/cdma system,\" IEEE Journal on Selected Areas in Communications, vol. 12, no. 5, pp. 796-807, 1994." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 407, + 301, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 407, + 301, + 426 + ], + "spans": [ + { + "bbox": [ + 47, + 407, + 301, + 426 + ], + "type": "text", + "content": "[195] M. L. Psiaki and T. E. Humphreys, “Gnss spoofing and detection,” Proceedings of the IEEE, vol. 104, no. 6, pp. 1258–1270, 2016." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 426, + 301, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 426, + 301, + 453 + ], + "spans": [ + { + "bbox": [ + 47, + 426, + 301, + 453 + ], + "type": "text", + "content": "[196] T. E. Humphreys, “Detection strategy for cryptographic gnss anti-spoofing,” IEEE Transactions on Aerospace and Electronic Systems, vol. 49, no. 2, pp. 1073–1090, 2013." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 454, + 301, + 489 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 454, + 301, + 489 + ], + "spans": [ + { + "bbox": [ + 47, + 454, + 301, + 489 + ], + "type": "text", + "content": "[197] Z. Wu, R. Liu, and H. Cao, \"Ecdsa-based message authentication scheme for beidou-ii navigation satellite system,\" IEEE Transactions on Aerospace and Electronic Systems, vol. 55, no. 4, pp. 1666-1682, 2019." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 491, + 301, + 518 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 491, + 301, + 518 + ], + "spans": [ + { + "bbox": [ + 47, + 491, + 301, + 518 + ], + "type": "text", + "content": "[198] K. Wesson, M. Rothlisberger, and T. Humphreys, “Practical cryptographic civilgps signal authentication,” NAVIGATION: Journal of the Institute of Navigation, vol. 59, no. 3, pp. 177–193, 2012." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 519, + 301, + 572 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 519, + 301, + 572 + ], + "spans": [ + { + "bbox": [ + 47, + 519, + 301, + 572 + ], + "type": "text", + "content": "[199] A. Ranganathan, H. Olafsdóttir, and S. Capkun, \"Spree: a spoofing resistant gps receiver,\" in Proceedings of the 22nd Annual International Conference on Mobile Computing and Networking, ser. MobiCom '16. New York, NY, USA: Association for Computing Machinery, 2016, p. 348-360. [Online]. Available: https://doi.org/10.1145/2973750.2973753" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 47, + 574, + 301, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 574, + 301, + 609 + ], + "spans": [ + { + "bbox": [ + 47, + 574, + 301, + 609 + ], + "type": "text", + "content": "[200] M. Ahmed, A. A. Soofi, S. Raza, F. Khan, S. Ahmad, W. U. Khan, M. Asif, F. Xu, and Z. Han, “Advancements in ris-assisted UAV for empowering multiaccess edge computing: A survey,” IEEE Internet of Things Journal, vol. 12, no. 6, pp. 6325–6346, 2025." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 47, + 610, + 301, + 637 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 610, + 301, + 637 + ], + "spans": [ + { + "bbox": [ + 47, + 610, + 301, + 637 + ], + "type": "text", + "content": "[201] G. K. Pandey, D. S. Gurjar, S. Yadav, Y. Jiang, and C. Yuen, “Uav-assisted communications with rf energy harvesting: A comprehensive survey,” IEEE Communications Surveys & Tutorials, pp. 1-1, 2024." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 47, + 638, + 301, + 683 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 638, + 301, + 683 + ], + "spans": [ + { + "bbox": [ + 47, + 638, + 301, + 683 + ], + "type": "text", + "content": "[202] P. Cao, L. Lei, S. Cai, G. Shen, X. Liu, X. Wang, L. Zhang, L. Zhou, and M. Guizani, \"Computational intelligence algorithms for uav swarm networking and collaboration: A comprehensive survey and future directions,\" IEEE Communications Surveys & Tutorials, vol. 26, no. 4, pp. 2684-2728, 2024." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 47, + 684, + 301, + 720 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 684, + 301, + 720 + ], + "spans": [ + { + "bbox": [ + 47, + 684, + 301, + 720 + ], + "type": "text", + "content": "[203] P. Li, H. Zhang, Y. Wu, L. Qian, R. Yu, D. Niyato, and X. Shen, \"Filling the missing: Exploring generative ai for enhanced federated learning over heterogeneous mobile edge devices,\" IEEE Transactions on Mobile Computing, vol. 23, no. 10, pp. 10001-10015, 2024." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 47, + 720, + 301, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 720, + 301, + 747 + ], + "spans": [ + { + "bbox": [ + 47, + 720, + 301, + 747 + ], + "type": "text", + "content": "[204] J. Wang, Y. Liu, H. Du, D. Niyato, J. Kang, H. Zhou, and D. I. Kim, \"Empowering wireless networks with artificial intelligence generated graph,\" arXiv preprint arXiv:2405.04907, 2024." + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 57, + 563, + 289 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 310, + 57, + 563, + 92 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 57, + 563, + 92 + ], + "spans": [ + { + "bbox": [ + 310, + 57, + 563, + 92 + ], + "type": "text", + "content": "[205] M. Xu, D. Niyato, J. Kang, Z. Xiong, S. Mao, Z. Han, D. I. Kim, and K. B. Letaief, \"When large language model agents meet 6g networks: Perception, grounding, and alignment,\" IEEE Wireless Communications, vol. 31, no. 6, pp. 63-71, 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 93, + 563, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 93, + 563, + 128 + ], + "spans": [ + { + "bbox": [ + 310, + 93, + 563, + 128 + ], + "type": "text", + "content": "[206] R. Zhang, H. Du, D. Niyato, J. Kang, Z. Xiong, P. Zhang, and D. I. Kim, \"Optimizing generative ai networking: A dual perspective with multi-agent systems and mixture of experts,\" arXiv preprint arXiv:2405.12472, 2024." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 129, + 563, + 163 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 129, + 563, + 163 + ], + "spans": [ + { + "bbox": [ + 310, + 129, + 563, + 163 + ], + "type": "text", + "content": "[207] A. H. Arani, P. Hu, and Y. Zhu, “Uav-assisted space-air-ground integrated networks: A technical review of recent learning algorithms,” IEEE Open Journal of Vehicular Technology, vol. 5, pp. 1004–1023, 2024." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 164, + 563, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 164, + 563, + 209 + ], + "spans": [ + { + "bbox": [ + 310, + 164, + 563, + 209 + ], + "type": "text", + "content": "[208] N. T. T. Van, N. L. Tuan, N. C. Luong, T. H. Nguyen, S. Feng, S. Gong, D. Niyato, and D. I. Kim, \"Network access selection for urclc and embb applications in sub-6ghz-mmwave-thz networks: Game theory versus multi-agent reinforcement learning,\" IEEE Transactions on Communications, pp. 1-1, 2024." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 209, + 563, + 245 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 209, + 563, + 245 + ], + "spans": [ + { + "bbox": [ + 310, + 209, + 563, + 245 + ], + "type": "text", + "content": "[209] Q. Yuan, L. Xiao, C. He, P. Xiao, and T. Jiang, \"Deep learning-based hybrid precoding for ris-aided broadband terahertz communication systems in the face of beam squint,\" IEEE Wireless Communications Letters, vol. 13, no. 2, pp. 303-307, 2024." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 246, + 563, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 246, + 563, + 289 + ], + "spans": [ + { + "bbox": [ + 310, + 246, + 563, + 289 + ], + "type": "text", + "content": "[210] G. Geraci, A. Garcia-Rodriguez, M. M. Azari, A. Lozano, M. Mezzavilla, S. Chatzinotas, Y. Chen, S. Rangan, and M. D. Renzo, \"What will the future of uav cellular communications be? a flight from 5g to 6g,\" IEEE Communications Surveys & Tutorials, vol. 24, no. 3, pp. 1304-1335, 2022." + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 554, + 25, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 554, + 25, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 554, + 25, + 563, + 32 + ], + "type": "text", + "content": "31" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 30 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09228/4d91038b-08d1-447d-8037-8ee551148a4c_content_list.json b/data/2025/2504_09xxx/2504.09228/4d91038b-08d1-447d-8037-8ee551148a4c_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..ef45d1d52de48cfe3dda71784a1d66683e786475 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09228/4d91038b-08d1-447d-8037-8ee551148a4c_content_list.json @@ -0,0 +1,1219 @@ +[ + { + "type": "text", + "text": "Learning Occlusion-Robust Vision Transformers for Real-Time UAV Tracking", + "text_level": 1, + "bbox": [ + 104, + 128, + 890, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "You Wu $^{1\\dagger}$ , Xucheng Wang $^{2\\dagger}$ , Xiangyang Yang $^{1}$ , Mengyuan Liu $^{1}$ , Dan Zeng $^{3}$ , Hengzhou Ye $^{1}$ , Shuiwang Li $^{1*}$", + "bbox": [ + 240, + 179, + 754, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1College of Computer Science and Engineering, Guilin University of Technology, China", + "bbox": [ + 150, + 215, + 844, + 233 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{2}$ School of Computer Science, Fudan University, Shanghai, China", + "bbox": [ + 238, + 233, + 759, + 251 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{3}$ School of Artificial Intelligence, Sun Yat-sen University, Zhuhai, China", + "bbox": [ + 210, + 251, + 784, + 268 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "wuyou@glut.edu.cn, xcwang317@glut.edu.cn, xyyang317@163.com, mengyuaner1122@foxmail.com, zengd8@mail.sysu.edu.cn, yehengzhou@glut.edu.cn, lishuiwang0721@163.com", + "bbox": [ + 107, + 272, + 883, + 304 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 246, + 339, + 326, + 353 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Single-stream architectures using Vision Transformer (ViT) backbones show great potential for real-time UAV tracking recently. However, frequent occlusions from obstacles like buildings and trees expose a major drawback: these models often lack strategies to handle occlusions effectively. New methods are needed to enhance the occlusion resilience of single-stream ViT models in aerial tracking. In this work, we propose to learn Occlusion-Robust Representations (ORR) based on ViTs for UAV tracking by enforcing an invariance of the feature representation of a target with respect to random masking operations modeled by a spatial Cox process. Hopefully, this random masking approximately simulates target occlusions, thereby enabling us to learn ViTs that are robust to target occlusion for UAV tracking. This framework is termed ORTrack. Additionally, to facilitate real-time applications, we propose an Adaptive Feature-Based Knowledge Distillation (AFKD) method to create a more compact tracker, which adaptively mimics the behavior of the teacher model ORTrack according to the task's difficulty. This student model, dubbed ORTrack-D, retains much of ORTrack's performance while offering higher efficiency. Extensive experiments on multiple benchmarks validate the effectiveness of our method, demonstrating its state-of-the-art performance. Codes is available at https://github.com/wuyou3474/ORTrack.", + "bbox": [ + 89, + 369, + 483, + 750 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 89, + 776, + 220, + 791 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Unmanned aerial vehicles (UAVs) are leveraged in a plethora of applications, with increasing emphasis on UAV tracking [4, 43, 46, 49, 52, 79, 84]. This form of tracking poses an exclusive set of challenges such as tricky viewing angles, motion blur, severe occlusions, and the need for", + "bbox": [ + 89, + 801, + 483, + 878 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/24a5d9de226bc27e8074d66ea960e0b8fbefd81096aea0e59ab84594da256ccc.jpg", + "image_caption": [ + "Figure 1. Compared to SOTA UAV trackers on UAVDT, our ORTrack-DeiT sets a new record with $83.4\\%$ precision and a speed of 236 FPS. Our ORTrack-D-DeiT strikes a better trade-off with $82.5\\%$ precision and a speed of about 313 FPS." + ], + "image_footnote": [], + "bbox": [ + 516, + 338, + 906, + 521 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "efficiency due to UAVs' restricted battery life and computational resources [5, 42, 80, 83]. Consequently, designing an effective UAV tracker requires a delicate balance between precision and efficiency. It needs to ensure accuracy while being conscious of the UAV's energy and computational constraints.", + "bbox": [ + 511, + 623, + 906, + 713 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In recent years, there has been a notable shift from discriminative correlation filters (DCF)-based methods, because of their unsatisfactory robustness, towards DL-based approaches, particularly with the adoption of single-stream architectures that integrate feature extraction and fusion via pre-trained Vision Transformer (ViT) backbone networks. This single-stream paradigm has proven highly effective in generic visual tracking, as evidenced by the success of recent methods such as OSTrack [91], SimTrack [8], Mixformer [13], and DropMAE [82]. Building on these advancements, Aba-VTrack [44] introduces a lightweight DL-based tracker within this framework, employing an adap", + "bbox": [ + 511, + 719, + 908, + 902 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.09228v1 [cs.CV] 12 Apr 2025", + "bbox": [ + 22, + 262, + 60, + 705 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "† Equal contribution. * Corresponding authors.", + "bbox": [ + 114, + 886, + 379, + 900 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "tive and background-aware token computation method to enhance inference speed, which demonstrates remarkable precision and speed for real-time UAV tracking. However, the use of a variable number of tokens in Aba-VTrack incurs significant time costs, primarily due to the unstructured access operations required during inference. Adding to this, it also grappled with establishing robustness when facing target occlusion, a challenge common in UAV tracking often triggered by obstructive elements like buildings, mountains, trees, and so forth. The problem is exacerbated by the fact that UAVs may not always be capable of circumventing these impediments due to potential large-scale movements involved.", + "bbox": [ + 93, + 90, + 480, + 285 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address these issues, we introduce a novel framework designed to enhance the occlusion robustness of ViTs for UAV tracking. Our approach, termed ORTrack, aims to learn ViT-based trackers that maintain robust feature representations even in the presence of target occlusion. This is achieved by enforcing an invariance in the feature representation of the target with respect to random masking operations modeled by a spatial Cox process. The random masking serves as a simulation of target occlusion, which is expected to mimic real occlusion challenges in UAV tracking and aid in learning Occlusion-Robust Representations (ORR). Notably, our method for learning occlusion-robust representation simply uses a Mean Squared Error (MSE) loss during training, adding no extra computational load during inference. Additionally, to enhance efficiency for real-time applications, we introduce an Adaptive Feature-Based Knowledge Distillation (AFKD) method. This method creates a more compact tracker, named ORTrack-D, which adaptively mimics the behavior of the teacher model ORTrack based on the complexity of the tracking task during training. The reasoning is that the teacher model, in its pursuit of powerful representations, may compromise its generalizability. Hence, in situations where generalizability is vital, the student model may perform better, and closely mimicking the teacher's behavior becomes less important. We use the deviation of GIoU loss [67] from its average value to quantify the difficulty of the tracking task, which makes sense as loss value is a commonly used criteria to define hard samples [70, 74, 77]. ORTrack-D maintains much of ORTrack's performance with higher efficiency, making it better suited for deployment in resource-constrained environments typical of UAV applications. Extensive experiments on four benchmarks show that our method achieves state-of-the-art performance.", + "bbox": [ + 93, + 291, + 480, + 805 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In summary, our contributions are as follows: (i) We propose to learn Occlusion-Robust Representations (ORR) by imposing an invariance in the feature representation of the target with respect to random masking operations modeled by a spatial Cox process, which can be easily integrated into other tracking frameworks without requiring additional ar", + "bbox": [ + 93, + 810, + 480, + 898 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "chitectures or increasing inference time; (ii) We propose an Adaptive Feature-Based Knowledge Distillation (AFKD) method to further enhance efficiency, in which the student model adaptively mimics the behavior of the teacher model according to the task's difficulty, resulting in a significant increase in tracking speed while only minimally reducing accuracy; (iii) We introduce ORTrack, a family of efficient trackers based on these components, which integrates seamlessly with other ViT-based trackers. ORTrack demonstrates superior performance while maintaining extremely fast tracking speeds. Extensive evaluations show that ORTrack achieves state-of-the-art real-time performance.", + "bbox": [ + 517, + 90, + 903, + 271 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related work", + "text_level": 1, + "bbox": [ + 517, + 287, + 648, + 301 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Visual Tracking.", + "text_level": 1, + "bbox": [ + 517, + 313, + 673, + 328 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In visual tracking, the primary approaches consist of DCF-based and DL-based trackers. DCF-based trackers are favored for UAV tracking due to their remarkable efficiency, but they face difficulties in maintaining robustness under complex conditions [31, 42, 46]. Recently developed lightweight DL-based trackers have improved tracking precision and robustness for UAV tracking [4, 5]; however, their efficiency lags behind that of most DCF-based trackers. Model compression techniques like those in [80, 83] have been used to further boost efficiency, yet these trackers still face issues with tracking precision. Vision Transformers (ViTs) are gaining traction for streamlining and unifying frameworks in visual tracking, as seen in studies like [13, 85, 86, 89, 91]. While these frameworks are compact and efficient, few are based on lightweight ViTs, making them impractical for real-time UAV tracking. To address this, Aba-ViTrack [44] used lightweight ViTs and an adaptive, background-aware token computation method to enhance efficiency for real-time UAV tracking. However, the variable token number in this approach necessitates unstructured access operations, leading to significant time costs. In this work, we aim to improve the efficiency of ViTs for UAV tracking through knowledge distillation, a more structured method.", + "bbox": [ + 517, + 337, + 903, + 696 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2. Occlusion-Robust Feature Representation.", + "text_level": 1, + "bbox": [ + 517, + 710, + 870, + 726 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Occlusion-robust feature representation is crucial in computer vision and image processing. It involves developing methods that can recognize and process objects in images even when parts are hidden or occluded [62, 76]. Early efforts often relied on handcrafted features, active appearance models, motion analysis, sensor fusion, etc [7, 33, 51, 71]. While effective in some cases, these methods struggled with the complexity and variability of real-world visual data. The advent of deep learning revolutionized the field. Many studies have applied Convolutional Neural Networks (CNNs) and other deep architectures to", + "bbox": [ + 517, + 734, + 903, + 898 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "extract occlusion-robust representations [35, 62, 66, 76]. These approaches use deep models to capture complex patterns and variations in visual data, making learned features resilient to occlusions and having proven valuable for many computer vision applications, such as action recognition [17, 88], pose estimation [62, 95], and object detection [12, 36]. The exploration of occlusion-robust representations in visual tracking has also demonstrated great success [1, 6, 27, 34, 39, 58, 59, 61, 94]. However, to our knowledge, there is a dearth of research to explore learning occlusion-robust ViTs particularly in a unified framework for UAV tracking. In this study, we delve into the exploration of learning occlusion-robust feature representations based on ViTs by simulating occlusion challenges using random masking modeled by a spatial Cox process, specifically tailored for UAV tracking. This study represents the first use of ViTs for acquiring occlusion-robust feature representations in UAV tracking.", + "bbox": [ + 89, + 90, + 480, + 363 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.3. Knowledge Distillation.", + "text_level": 1, + "bbox": [ + 89, + 371, + 307, + 386 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Knowledge distillation is a technique used to compress models by transferring knowledge from a complex \"teacher\" model to a simpler \"student\" model, with the aim of maintaining performance while reducing computational resources and memory usage [63, 75]. It involves various types of knowledge, distillation strategies, and teacher-student architectures, typically falling into three categories: response-based, feature-based, and relation-based distillation [26, 63, 78]. Widely applied in tasks such as image classification [64], object detection [9], and neural machine translation [42], it offers potential to improve the efficiency and even effectiveness of deep learning models. Recently, it has been successfully utilized to enhance the efficiency of DL-based trackers. For instance, Li et al. [41] used mask-guided self-distillation to compress Siamese-based visual trackers. Sun et al. [72] introduced a lightweight dual Siamese tracker for hyperspectral object tracking, using a spatial-spectral knowledge distillation method to learn from a deep tracker. However, these techniques are mainly Siamese-based and tailored to specific tracking frameworks, posing challenges for adaptation to our ViT-based approach. In this study, we propose a simple yet effective feature-based knowledge distillation method, in which the student adaptively replicate the behavior of the teacher based on the complexity of the tracking task during training.", + "bbox": [ + 89, + 393, + 482, + 772 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3. Method", + "text_level": 1, + "bbox": [ + 89, + 784, + 181, + 799 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this section, we first provide a brief overview of our end-to-end tracking framework, named ORTrack, as shown in Figure 2. Then, we introduce the occlusion-robust representation learning based on spatial Cox processes and the method of adaptive knowledge distillation. Finally, we detail the prediction head and training loss.", + "bbox": [ + 89, + 809, + 480, + 901 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1. Overview", + "text_level": 1, + "bbox": [ + 513, + 90, + 624, + 104 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The proposed ORTrack introduces an novel single-stream tracking framework, featuring a spatial Cox process-based masking for occlusion-robust representation learning and an adaptive feature-based knowledge distillation pipeline. ORTrack consists of two sequential training phases: the teacher model training pipeline for learning occlusion-robust representations, followed by the student training pipeline involving adaptive knowledge distillation. In the teacher model training phase, the input includes a target template $Z \\in \\mathbb{R}^{3 \\times H_z \\times W_z}$ of spatial size $H_z \\times W_z$ , a randomly masked target template $Z' = \\mathfrak{m}(Z)$ , and a search image $X \\in \\mathbb{R}^{3 \\times H_x \\times W_x}$ of spatial size $H_x \\times W_x$ , where $\\mathfrak{m}(\\cdot)$ represents the random masking operation that masks out non-overlap patches of size $b \\times b$ with a certain masking ratio $\\sigma$ . To achieve occlusion-robust representation with ViTs, we minimize the mean squared error (MSE) between two versions of the template representation: one with random masking and one without. During the training of the student model, the teacher's weights remain fixed while both the teacher and student models receive inputs $Z$ and $X$ . Let $\\mathfrak{B}_T$ and $\\mathfrak{B}_S$ represent the backbones of the teacher and student, respectively. In our implementation, $\\mathfrak{B}_T$ and $\\mathfrak{B}_S$ share the same structure of the ViT layer but differ in the number of layers. Feature-based knowledge distillation is used to transfer the knowledge embedded in the teacher model's backbone features to the student model through an adaptive distillation loss.", + "bbox": [ + 511, + 113, + 906, + 521 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Occlusion-Robust Representations (ORR) Based on Spatial Cox Processes", + "text_level": 1, + "bbox": [ + 511, + 530, + 903, + 561 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To begin, we describe two random masking operations used to simulate occlusion challenges: one from MAE [28] and our proposed method based on a Spatial Cox process, denoted by $\\mathfrak{m}_{\\mathrm{U}}$ and $\\mathfrak{m}_{\\mathrm{C}}$ , respectively. Although $\\mathfrak{m}_{\\mathrm{U}}$ allows the model to learn robust representations that are less sensitive to noise or missing information by randomly ignoring certain parts of the input data during training [28], it is less effective when used to simulate occlusion since each spatial position (in the sense of block size) is masked out with equal probability, especially in our situation where the target template generally contains background. To ensure that the target is masked out as expected with higher probabilities at a given masking ratio, thereby making the occlusion simulation more effective, we employ a finite Cox process [32] to model this masking operation, which is detailed as follows.", + "bbox": [ + 511, + 568, + 906, + 808 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Define two associated random matrices $\\mathbf{m} = (m_{i,j})$ , $\\mathbf{b} = (b_{i,j})$ , $1 \\leqslant i \\leqslant H_z / b$ , $1 \\leqslant j \\leqslant W_z / b$ , where $m_{i,j} \\sim \\mathcal{U}(0,1)$ (i.e., $m_{i,j}$ follows a uniform distribution over the interval [0, 1]), $b_{i,j} \\in \\{0,1\\}$ equals 1 if $m_{i,j} \\in \\mathrm{TopK}(\\mathbf{m}, K)$ , and 0 otherwise. $\\mathrm{TopK}(\\mathbf{m}, K)$ returns the $K = \\lfloor (1 - \\sigma)H_zW_z \\rfloor$ largest elements from $\\mathbf{m}$ , where $\\lfloor x \\rfloor$ rounds $x$ to", + "bbox": [ + 511, + 810, + 908, + 901 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/4082fd996ec64079d2abc075f06f60d7af465a0f16f8a18689b4c86819470823.jpg", + "image_caption": [ + "Figure 2. Overview of the proposed ORTrack framework, which includes separate training pipelines for a teacher and a student model. Note that the spatial Cox process-based masking and occlusion-robust representation learning are applied only in the teacher pipeline. Once the teacher is trained, its weights are fixed for training the student model with the proposed adaptive knowledge distillation." + ], + "image_footnote": [], + "bbox": [ + 178, + 85, + 831, + 333 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "the nearest integer. Mathematically, $\\mathfrak{m}_{\\mathrm{U}}(Z) = Z\\odot (\\mathbf{b}\\otimes \\mathbf{1})$ where $\\odot$ denotes the Hadamard product and $\\otimes$ denotes the tensor product, $\\mathbf{1}$ is an all-ones matrix of size $b\\times b$ . Before defining $\\mathfrak{m}_{\\mathrm{C}}$ , we establish core notations relevant to spatial Cox processes. It extends the concept of spatial inhomogeneous Poisson point processes by incorporating a random intensity function, which, in turn, is defined as a Poisson point process with an intensity determined by a location-dependent function in the underlying space. For Euclidean space $\\mathbb{R}^2$ , an inhomogeneous Poisson point process is defined by a locally integrable positive intensity function $\\lambda \\colon \\mathbb{R}^2\\to [0,\\infty)$ , such that for every bounded region $\\mathcal{B}$ the integral $\\Lambda (\\mathcal{B}) = \\int_{\\mathcal{B}}\\lambda (x,y)\\mathrm{d}xdy$ is finite, where $\\Lambda (\\mathcal{B})$ has the interpretation of being the expected number of points of the Poisson process located in $\\mathcal{B}$ , and for every collection of disjoint bounded Borel measurable sets $\\mathcal{B}_1,\\dots,\\mathcal{B}_k$ [60], its number distributions is defined by $\\operatorname*{Pr}\\{\\mathrm{N}(\\mathcal{B}_i) = n_i,i = 1,\\ldots ,k\\} = \\prod_{i = 1}^{k}\\frac{(\\Lambda(\\mathcal{B}_i))^{n_i}}{n_i!} e^{-\\Lambda (\\mathcal{B}_i)}$ , $n_i\\in \\mathbb{Z}^{0 + }$ , where $\\operatorname*{Pr}$ denotes the probability measure, $\\mathrm{N}$ indicates the random counting measure such that $\\Lambda (\\mathcal{B}) = \\mathbb{E}[\\mathrm{N}(\\mathcal{B})]$ , $\\mathbb{E}$ is the expectation operator. In particular, the conditional distribution of the points in a bounded set $\\mathcal{B}$ given that $\\mathrm{N}(\\mathcal{B}) = n\\in \\mathbb{Z}^{0 + }$ is not uniform, and $f_{n}(p_{1},\\dots,p_{n}) = \\prod_{n}^{i = 1}\\frac{\\lambda(p_{i})}{\\Lambda(\\mathcal{B})}$ , $p_1,\\dots,p_n\\in \\mathcal{B}$ defines the corresponding location density function of the $n$ points. Since a Cox process can be regarded as the result of a two-stage random mechanism for which it is sometimes termed 'doubly stochastic Poisson process' [32], the finite Cox processes can be simulated in a straightforward way based on the hierarchical nature of the model. Specifically, in the first step, the intensity $\\lambda (x,y)$ is generated. In the second step, an in", + "bbox": [ + 91, + 412, + 485, + 888 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "homogeneous Poisson point process is simulated using the generated $\\lambda(x, y)$ [32, 53]. The thinning algorithm [11] is used here for simulating inhomogeneous Poisson point processes. It involves simulating a homogeneous Poisson point process with a higher rate than the maximum possible rate of the inhomogeneous process, and then \"thinning\" out the generated points to match the desired intensity function.", + "bbox": [ + 511, + 412, + 906, + 518 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this work, the randomness of the intensity function is modeled by a random variable $\\Gamma$ that has a Poisson distribution with expectation of $\\varsigma$ , namely, $\\operatorname{Pr}\\{\\Gamma = k\\} = \\frac{\\varsigma^k e^{-\\varsigma}}{k!}$ where $k \\in \\mathbb{Z}^{0+}$ . The intensity function of the inhomogeneous Poisson point process is then given by", + "bbox": [ + 511, + 518, + 908, + 597 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\lambda (x, y) = \\frac {\\Gamma e ^ {- (x ^ {2} + y ^ {2})}}{\\int_ {\\mathcal {B}} e ^ {- (x ^ {2} + y ^ {2})} d x d y}. \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 614, + 602, + 906, + 638 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Note that $\\lambda(x, y)$ is a bell-shape function that gives more intensities to the central area of $\\mathcal{B}$ . Let $\\mathcal{B}$ denote the rectangle region of size $H_z / b \\times W_z / b$ representing the template region. If we simulate the Cox process within $\\mathcal{B}$ and denote a resulted point pattern by $\\Xi$ , we can obtain a matrix $\\mathbf{b}' = (b_{i,j}')_{1 \\leqslant i \\leqslant H_z / b, 1 \\leqslant i \\leqslant W_z / b}$ , where $b_{i,j}'$ equals 1 if $(i, j) \\in \\Xi$ , and 0 otherwise, with which our $\\mathfrak{m}_{\\mathbb{C}}$ can be defined as $\\mathfrak{m}_{\\mathbb{C}}(Z) = Z \\odot (\\mathbf{b}' \\otimes \\mathbf{1})$ . It is worthy of note that if $\\varsigma = [(1 - \\sigma)H_zW_z]$ , since $\\mathbb{E}[\\Lambda(\\mathcal{B})] = \\mathbb{E}[\\int_{\\mathcal{B}} \\lambda(x, y) dxdy] = \\mathbb{E}[\\Gamma] = \\varsigma$ , in this case, the expected masking ratio of our masking operation is equal to the masking ratio of $\\mathfrak{m}_{\\mathbb{C}}$ . Thus, in addition to inhomogeneous intensity, our method can simulate more diverse pattern of occlusion due to the introduced randomness of the masking ratio.", + "bbox": [ + 511, + 643, + 908, + 868 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We denote the total number of tokens by $\\mathcal{K}$ , the embedding dimension of each token by $d$ , and all the tokens out", + "bbox": [ + 511, + 869, + 906, + 901 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "put by the $L$ -th layer of $\\mathfrak{B}_T$ with respect to inputs $X$ and $Z$ by $\\mathbf{t}_{1:\\mathcal{K}}^L (Z,X;\\mathfrak{B}_T)\\in \\mathbb{R}^{\\mathcal{K}\\times d}$ . Let $\\mathbf{t}_{\\mathcal{K}_Z\\cup \\mathcal{K}_X}^L (Z,X;\\mathfrak{B}_T) = \\mathbf{t}_{1:\\mathcal{K}}^L (Z,X;\\mathfrak{B}_T)$ , where $\\mathcal{K}_Z\\cup \\mathcal{K}_X = [1,\\mathcal{K}]$ , $\\mathbf{t}_{\\mathcal{K}_Z}^L$ and $\\mathbf{t}_{\\mathcal{K}_X}^L$ represent the tokens corresponding to the template and the search image, respectively. By the same token, the output tokens corresponding to inputs $X$ and $Z'$ are $\\mathbf{t}_{1:\\mathcal{K}}^L (Z',X;\\mathfrak{B}_T)$ . The feature representations of $Z$ and $Z'$ can be recovered by tracking their token indices in respective ordered sequences, which specifically are $t_{1:\\mathcal{K}_z}^L (Z,X;\\mathfrak{B}_T)$ and $t_{1:\\mathcal{K}_z}^L (Z',X;\\mathfrak{B}_T)$ , respectively. The core idea of our occlusion-robust representations learning is that the mean square error between the feature representation of $Z$ and that of $Z'$ is minimized, which is implemented by minimizing the following MSE loss,", + "bbox": [ + 89, + 90, + 483, + 305 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {o r r} = \\left\\| t _ {1: \\mathcal {K} _ {z}} ^ {L} (Z, X; \\mathfrak {B} _ {T}) - t _ {1: \\mathcal {K} _ {z}} ^ {L} \\left(Z ^ {\\prime}, X; \\mathfrak {B} _ {T}\\right) \\right\\| ^ {2}. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 130, + 313, + 482, + 330 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "During inference, only $[Z,X]$ is input to the model without the need for random template masking. Consequently, our method incurs no additional computational cost during inference. Notably, our method is independent of the ViTs used, any efficient ViTs can work in our framework.", + "bbox": [ + 89, + 339, + 483, + 415 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3. Adaptive Feature-Based Knowledge Distillation (AFKD)", + "text_level": 1, + "bbox": [ + 89, + 424, + 483, + 455 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Feature-based knowledge distillation is a technique in machine learning that trains a smaller student model to mimic a larger teacher model, which, instead of focusing only on final outputs, transfers intermediate features or representations from the teacher to the student [26, 78]. This method uses the detailed internal representations from the teacher model to improve the student's learning process. However, there is a risk that the student model might overfit to the specific features of the teacher model, rather than generalizing well to new data. This can be particularly problematic if the teacher model has learned spurious correlations in the data. To combat this, we propose adaptively transferring knowledge based on the difficulty of the tracking task. We quantify this difficulty using the deviation of the GIoU loss [67] (see Section 3.4) from its average value, calculated between the student's prediction and the ground truth. Adapting knowledge transfer based on difficulty ensures that the student model doesn't heavily adjust its weights on easy tasks, which it can handle already probably due to its generalizability. Instead, it focuses more on challenging scenarios where its feature representation is less effective.", + "bbox": [ + 89, + 462, + 483, + 779 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Additionally, the choice of teacher-student architectures is crucial in knowledge distillation. Given the wide array of possible student models, we adopt a self-similar approach where the student model mirrors the teacher's architecture but employs a smaller ViT backbone, using fewer ViT blocks. This strategy simplifies the design and eliminates the need for additional alignment techniques that would otherwise be necessary due to mismatched feature di", + "bbox": [ + 89, + 780, + 483, + 900 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "mensions. Lastly, layer selection and the metric of feature similarity are also crucial aspects of feature-based knowledge distillation. Given MSE's popularity in feature-based knowledge distillation and to avoid potential complexity associated with using multiple layers, we employ MSE to penalize differences between the output feature representations of both the teacher and student model's backbones, i.e., $t_{1:\\mathcal{K}}^{L}(Z,X;\\mathfrak{B}_{T})$ and $t_{1:\\mathcal{K}}^{L}(Z,X;\\mathfrak{B}_{S})$ . The proposed adaptive knowledge distillation loss is defined by", + "bbox": [ + 511, + 90, + 906, + 227 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {a f k d} = (\\alpha + \\beta \\left(\\mathcal {L} _ {i o u} - \\overline {{\\mathcal {L} _ {i o u}}}\\right)) \\| t _ {1: \\mathcal {K}} ^ {L} (Z, X; \\mathfrak {B} _ {T}) - t _ {1: \\mathcal {K}} ^ {L} (Z, X; \\mathfrak {B} _ {S}) \\| ^ {2}, \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 511, + 234, + 929, + 265 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\alpha + \\beta (\\mathcal{L}_{iou} - \\overline{\\mathcal{L}_{iou}}) \\coloneqq \\varpi (\\mathcal{L}_{iou}; \\alpha, \\beta)$ is a function of the deviation of GIoU loss from its average, with slop $\\alpha$ and intercept $\\beta$ , used to quantify the difficulty of the tracking task.", + "bbox": [ + 511, + 265, + 906, + 325 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4. Prediction Head and Training Loss", + "text_level": 1, + "bbox": [ + 511, + 335, + 818, + 351 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Following the corner detection head in [13, 91], we use a prediction head consisting of multiple Conv-BN-ReLU layers to directly estimate the bounding box of the target. The output tokens corresponding to the search image are first reinterpreted to a 2D spatial feature map and then fed into the prediction head. The head outputs a local offset $\\mathbf{o} \\in [0,1]^{2 \\times H_x / P \\times W_x / P}$ , a normalized bounding box size $\\mathbf{s} \\in [0,1]^{2 \\times H_x / P \\times W_x / P}$ , and a target classification score $\\mathbf{p} \\in [0,1]^{H_x / P \\times W_x / P}$ as prediction outcomes. The initial estimation of the target position depends on identifying the location with the highest classification score, i.e., $(x_c, y_c) = \\operatorname{argmax}_{(x,y)} \\mathbf{p}(x,y)$ . The final target bounding box is estimated by $\\{(x_t, y_t); (w,h)\\} = \\{(x_c, y_c) + \\mathbf{o}(x_c, y_c); \\mathbf{s}(x_c, y_c)\\}$ . For the tracking task, we adopt the weighted focal loss [40] for classification, a combination of $L_1$ loss and GIoU loss [67] for bounding box regression. The total loss for tracking prediction is:", + "bbox": [ + 511, + 358, + 908, + 616 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {p r e d} = \\mathcal {L} _ {c l s} + \\lambda_ {i o u} \\mathcal {L} _ {i o u} + \\lambda_ {L _ {1}} \\mathcal {L} _ {L _ {1}}, \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 591, + 625, + 903, + 642 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where the constants $\\lambda_{iou} = 2$ and $\\lambda_{L_1} = 5$ are set as in [13, 91]. The overall loss $\\mathcal{L}_T = \\mathcal{L}_{pred} + \\gamma \\mathcal{L}_{orr}$ is used to train the teacher end-to-end after loading the pretrained weights of the ViT trained with ImageNet [68], where the constant $\\gamma$ is set to $2.0 \\times 10^{-4}$ . After this training, we fix the weights of the teacher model, and employ the overall loss $\\mathcal{L}_S = \\mathcal{L}_{pred} + \\mathcal{L}_{afkd}$ , for end-to-end knowledge distillation training.", + "bbox": [ + 511, + 650, + 908, + 772 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Experiments", + "text_level": 1, + "bbox": [ + 511, + 784, + 645, + 801 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We evaluate our method on four UAV tracking benchmarks: DTB70 [45], UAVDT [19], VisDrone2018 [98], and UAV123 [57]. All experiments run on a PC with an i9-10850K processor, 16GB RAM, and an NVIDIA TitanX GPU. We compare our method against 26 state-of-the-art trackers, using their official codes and hyper", + "bbox": [ + 511, + 809, + 906, + 902 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/e108445084f2db53ba04ebd300c9ae730732181ffe90090330cd8f3bc9c758c5.jpg", + "table_caption": [ + "Table 1. Precision (Prec.), success rate (Succ.), and speed (FPS) comparison between ORTrack and lightweight trackers on four UAV tracking benchmarks, i.e., DTB70 [45], UAVDT [19], VisDrone2018 [98], and UAV123 [57]. Red, blue and green indicate the first, second and third place. Note that the percent symbol (\\%) is omitted for all Prec. and Succ. values." + ], + "table_footnote": [], + "table_body": "
MethodSourceDTB70UAVDTVisDrone2018UAV123Avg.Avg.FPSFLOPs (GMac)Param. (M)
Prec.Succ.Prec.Succ.Prec.Succ.Prec.Succ.Prec.Succ.GPUCPU
DCF-basedKCF [29]TAPMI 1546.828.057.129.068.541.352.333.156.232.9-624.3--
fDSST [16]TPAMI 1753.435.766.638.369.851.058.340.562.041.4-193.4--
ECO_HC [14]CVPR 1763.544.869.441.680.858.171.049.671.248.5-83.5--
AutoTrack [46]CVPR 2071.647.871.845.078.857.368.947.272.849.3-57.8--
RACF [42]PR 2272.650.577.349.483.460.070.247.775.951.8-35.6--
CNN-basedHiFT [4]ICCV 2180.259.465.247.571.952.678.759.074.054.6160.3-7.29.9
TCTrack [5]CVPR 2281.262.272.553.079.959.480.060.578.458.8149.6-8.89.7
SGDViT [90]ICRA 2378.560.465.748.072.152.175.457.572.954.5110.5-11.323.3
DRCI [93]ICME 2381.461.884.059.083.460.076.759.781.460.1281.362.73.68.8
PRL-Track [22]IROSS 2479.560.673.153.572.653.879.159.376.156.8132.3-7.412.0
VIT-basedAba-ViTrack [44]ICCV 2385.966.483.459.986.165.386.466.485.564.5181.550.32.48.0
SMAT [25]WACV 2481.963.880.858.782.563.481.864.681.862.6126.8-3.28.6
AVTrack-DeiT [47]ICML 2484.365.082.158.786.065.384.866.884.263.8260.359.80.97-1.93.5-7.9
ORTrack-DeiTOurs86.266.483.460.188.666.884.366.485.665.0226.455.42.47.9
ORTrack-D-DeiT83.765.182.559.784.663.984.066.183.763.7292.364.71.55.3
", + "bbox": [ + 127, + 142, + 870, + 325 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "parameters. We evaluate our approach against 13 state-of-the-art (SOTA) lightweight trackers (see Table 1) and 14 SOTA deep trackers designed specifically for generic visual tracking (refer to Table 2).", + "bbox": [ + 89, + 351, + 482, + 411 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Implementation Details", + "text_level": 1, + "bbox": [ + 89, + 420, + 307, + 436 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We adopt different ViTs as backbones, including ViT-tiny [18], Eva-tiny [21], and DeiT-tiny [73], to build three trackers for evaluation: ORTrack-ViT, ORTrack-Eva, and ORTrack-DeiT. The head of ORTrack consists of a stack of four Conv-BN-ReLU layers. The search region and template sizes are set to $256 \\times 256$ and $128 \\times 128$ , respectively. A combination of training sets from GOT-10k [30], LaSOT [20], COCO [48], and TrackingNet [56] is used for the training. The batch size is set to 32. We employ the AdamW optimizer [50], with a weight decay of $10^{-4}$ and an initial learning rate of $4 \\times 10^{-5}$ . The training is conducted over 300 epochs, with 60,000 image pairs processed in each epoch. The learning rate is reduced by a factor of 10 after 240 epochs.", + "bbox": [ + 88, + 444, + 482, + 655 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2. State-of-the-art Comparison", + "text_level": 1, + "bbox": [ + 89, + 665, + 344, + 681 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Comparison with Lightweight Trackers. The overall performance of our ORTrack in comparison to 13 competing trackers on the four benchmarks is displayed in Table 1. As can be seen, our trackers demonstrate superior performance among all these trackers in terms of average (Avg.) precision (Prec.), success rate (Succ.) and speeds. On average, RACF [42] demonstrated the highest Prec. $(75.9\\%)$ and Succ. $(51.8\\%)$ among DCF-based trackers, DRCI [93] achieves the highest precision and success rates, with $81.4\\%$ and $60.1\\%$ , respectively, among CNN-based trackers. However, the average Prec. and Succ. of all our trackers are greater than $82.0\\%$ and $62.0\\%$ , respectively, clearly surpassing DCF- and CNN- based approaches. Additionally, our ORTrack-DeiT achieves the highest Avg. Prec. and", + "bbox": [ + 89, + 688, + 482, + 900 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Avg. Succ. of $85.6\\%$ and $65.0\\%$ , respectively, among all competing trackers. Although Aba-ViT track achieves performance close to our ORTrack-DeiT, its GPU speed is significantly lower, with a $23.6\\%$ relative gap. Notably, when the proposed adaptive knowledge distillation is applied to ORTrack-DeiT, the resulting student model, ORTrack-D-DeiT, shows a significant speed increase: $29.1\\%$ on GPU and $16.8\\%$ on CPU. This improvement is accompanied by a minimal reduction in accuracy, with only a $1.9\\%$ decrease in Avg. Prec. and a $1.3\\%$ decrease in Avg. Succ.. All proposed trackers can run in real-time on a single $\\mathbf{CPU}^*$ and our ORTrack-DeiT sets a new performance record for real-time UAV tracking. We also compare the floating point operations per second (FLOPs) and number of parameters (Params.) of our method with CNN-based and ViT-based trackers in Table 1. Our method demonstrates a relatively lower parameter count and reduced computational complexity compared to these approaches. Notably, since AVTrack-DeiT tracker features adaptive architectures, the FLOPs and parameters range from minimum to maximum values. These results highlight our method's effectiveness and its state-of-the-art performance.", + "bbox": [ + 511, + 351, + 906, + 681 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Comparison with Deep Trackers. The proposed ORTrack-DeiT is also compared with 14 SOTA deep trackers in Table 2, which shows precision (Prec.) and GPU speed on VisDrone2018. Our ORTrack-DeiT surpasses all other methods in both metrics, demonstrating its superior accuracy and speed. Although trackers like AQATrack [87], HIPTrack [2], and ROMTrack [3] achieve precision comparable to our ORTrack-DeiT, their GPU speeds are much slower. Specifically, our method is 4, 6, and 4 times faster than AQATrack, HIPTrack, and ROMTrack, respectively.", + "bbox": [ + 511, + 683, + 908, + 834 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Attribute-Based Evaluation. To access our method's robustness against target occlusion, we compare ORTrack-", + "bbox": [ + 511, + 834, + 906, + 864 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "*Real-time performance applies to platforms similar to or more advanced than ours.", + "bbox": [ + 511, + 875, + 906, + 898 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/30f4f63d5479c9e7c39cc7978d66b2f065cec62132404c0b0f5b0ec73dd8fa3a.jpg", + "table_caption": [ + "Table 2. Precision (Prec.) and speed (FPS) comparison between ORTrack-DeiT and deep-based trackers on VisDrone2018 [98]." + ], + "table_footnote": [], + "table_body": "
TrackerSourcePrec.Succ.FPSTrackerSourcePrec.Succ.FPSTrackerSourcePrec.Succ.FPS
ORTrack-DeiTOurs88.666.8206.2ZoomTrack [38]NIPS 2381.463.461.7SimTrack [8]ECCV 2280.060.969.7
AQATrack [87]CVPR 2487.266.953.4SeqTrack [10]CVPR 2385.365.815.3ToMP [55]CVPR 2284.164.421.4
HIPTrack [2]CVPR 2486.767.131.3MAT [96]CVPR 2381.662.268.4KeepTrack [54]ICCV 2184.063.520.3
EVPTrack [69]AAAI 2484.565.822.1SparseTT [23]IJCAI 2281.462.130.2SAOT [97]ICCV 2176.959.135.4
ROMTrack [3]ICCV 2386.466.751.1OSTrack [91]ECCV 2284.264.862.7PrDiMP50 [15]CVPR 2079.459.742.6
", + "bbox": [ + 114, + 114, + 883, + 186 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/ed103c4c98bb6d874794172cd813dd855455cca2395fd3ffe3039b18f26b538f.jpg", + "image_caption": [ + "Figure 3. Attribute-based comparison on the partial occlusion subset of VisDrone2018 [98]. ORTrack-DeiT* refers to ORTrack-DeiT without applying the occlusion-robust enhancement." + ], + "image_footnote": [], + "bbox": [ + 93, + 208, + 478, + 378 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "DeiT alongside 16 SOTA trackers on the partial occlusion subset of VisDrone2018. Additionally, we also assess the baseline ORTrack-DeiT*, i.e., ORTrack-DeiT without applying the proposed method for learning Occlusion-Robust Representation (ORR), for comparison. The precision plot are presented in Fig. 3, with additional attribute-based evaluation results provided in the supplemental materials. As observed, ORTrack-DeiT achieves the second-highest precision $(85.0\\%)$ , just slightly behind the first-ranked tracker AQATrack by $0.2\\%$ . Remarkably, incorporating the proposed components leads to a significant improvement over ORTrack-DeiT*, with increases of $6.9\\%$ in Prec., well underscoring the effectiveness of our method.", + "bbox": [ + 88, + 459, + 482, + 656 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3. Ablation Study", + "text_level": 1, + "bbox": [ + 89, + 667, + 243, + 684 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/778d6455e7c357455f6bc0b24a76e246ad665f3f59a3cf71913b262d79b1b256.jpg", + "table_caption": [ + "Table 3. Effect of ORR and AFKD on the baseline trackers." + ], + "table_footnote": [], + "table_body": "
TrackerORRAFKDUAVDTFPS
Prec.Succ.
ORTrack-ViT77.055.6216.2
80.3↑3.358.2↑2.6-
79.1↑2.157.5↑1.9290.3↑34%
ORTrack-Eva78.156.6238.3
80.8↑2.758.7↑2.1-
79.5↑1.457.8↑1.2308.8↑30%
ORTrack-DeiT78.656.7218.4
83.4↑4.860.1↑3.4-
82.5↑3.959.7↑3.0298.7↑36%
", + "bbox": [ + 116, + 731, + 459, + 854 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Effect of Occlusion-Robust Representations (ORR) and Adaptive Feature-Based Knowledge Distillation", + "bbox": [ + 89, + 869, + 482, + 902 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "(AFKD). To demonstrate the effectiveness of the proposed ORR and AFKD, Table 3 shows the evaluation results on UAVDT dataset as these components are gradually integrated into the baselines. To avoid potential variations due to randomness, we only present the speed of the baseline, since the GPU speeds of the baseline and its ORR-enhanced version are theoretically identical. As can be seen, the incorporation of ORR significantly enhances both Prec. and Succ. for all baseline trackers. Specifically, the Prec. increases for ORTrack-ViT, ORTrack-Eva, and ORTrack-DeiT are $3.3\\%$ , $2.7\\%$ , and $4.8\\%$ , respectively, while the Succ. increases are $2.6\\%$ , $2.1\\%$ , and $3.1\\%$ , respectively. These significant enhancements highlight the effectiveness of ORR in improving tracking precision. The further integration of AFKD results in consistent improvements in GPU speeds, with only slight reductions in Prec. and Succ. Specifically, all baseline trackers experience GPU speed enhancements of over $30.0\\%$ , with ORTrack-DeiT showing an impressive $36.0\\%$ improvement. These results affirm the effectiveness of AFKD in optimizing tracking efficiency while maintaining high tracking performance.", + "bbox": [ + 511, + 210, + 906, + 529 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/a32930026e85095da5c264c34b784b8587c0f7197f937d9831316c7c501c4002.jpg", + "table_caption": [ + "Table 4. Impact of various Masking Operators on performance." + ], + "table_footnote": [], + "table_body": "
MethodmUmCSAM[37]AdAutoMix[65]CutMix[92]VisDrone2018
Prec.Succ.
ORTrack-DeiT81.662.2
86.765.4
88.666.8
86.865.6
84.363.8
85.764.2
", + "bbox": [ + 519, + 566, + 901, + 659 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Effect of Masking Operators. To demonstrate the superiority of the proposed masking operator in terms of performance, we evaluate ORTrack-DeiT with various implementations of masking operators (i.e., $\\mathfrak{m}_{\\mathrm{U}}$ , $\\mathfrak{m}_{\\mathrm{C}}$ , and SAM [37]) alongside data mixing augmentation methods (i.e., AdAutoMix [65] and CutMix [92]). The evaluation results on VisDrone2018 are presented in Table 4. As shown, although using SAM, AdAutoMix, and CutMix improves performance, the best result achieved with SAM is only comparable to the performance of our $\\mathfrak{m}_{\\mathrm{U}}$ masking operator. When $\\mathfrak{m}_{\\mathrm{C}}$ is applied, the improvements are even more substantial, with increases of $7.0\\%$ and $4.6\\%$ , respectively. These results validate the effectiveness of the proposed ORR component and particularly demonstrate the superiority of the masking operator based on spatial Cox processes.", + "bbox": [ + 511, + 674, + 906, + 900 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/0797ddf346306b391113d1ad5a815add8a24529d2d51fe9d06dc617dc39d1caf.jpg", + "table_caption": [ + "Table 5. Impact of the adaptive knowledge distillation loss on the generalizability on LaSOT and TrackingNet." + ], + "table_footnote": [], + "table_body": "
MethodKDAFKDLaSOTTrackingNet
AUCPnormPAUCPnormP
ORTrack-DeiT53.760.852.672.877.867.1
54.061.253.273.178.467.4
54.662.654.373.779.168.2
", + "bbox": [ + 116, + 128, + 460, + 191 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Impact of the Adaptive Knowledge Distillation Loss. To assess the impact of the adaptive knowledge distillation loss on generalizability, we train ORTrack-DeiT using GOT-10K with $\\varpi (\\mathcal{L}_{iou};\\alpha ,\\beta)$ and $\\varpi (\\mathcal{L}_{iou};\\alpha ,0)$ separately, then evaluate them on LaSOT and TrackingNet. The results are shown in Table 5. Note that $\\varpi (\\mathcal{L}_{iou};\\alpha ,0)$ degenerates to a non-adaptive knowledge distillation loss as it becomes a constant. As can be seen, AFKD demonstrates greater performance improvements than KD. For instance, using AFKD results in additional gains of over $1.1\\%$ in $P_{norm}$ and $P$ on LaSOT, demonstrating its superior generalizability.", + "bbox": [ + 89, + 213, + 483, + 396 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/3b4f89479c5ff8243e6be20cd787d0178a5864faf1227a509ce683510651c3ac.jpg", + "table_caption": [ + "Table 6. Application of our ORR component to three SOTA trackers: ARTrack [81], GRM [24], and DropTrack[82]." + ], + "table_footnote": [], + "table_body": "
TrackerORRUAVDTVisDrone2018
Prec.Succ.Prec.Succ.
ARTrack[81]77.154.677.759.5
78.5↑1.455.8↑1.279.5↑1.860.8↑1.3
GRM[24]79.057.782.763.4
81.7↑1.759.3↑1.684.8↑2.164.6↑1.2
DropTrack[82]76.955.981.562.7
78.7↑1.857.4↑1.582.8↑1.364.2↑1.5
", + "bbox": [ + 112, + 446, + 464, + 540 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Application to SOTA trackers. To show the wide applicability of our proposed method, we incorporate the proposed ORR into three existing SOTA trackers: ARTrack [81], GRM [24], and DropTrack [82]. Please note that we replace the model's original backbones with ViT-tiny [18] to reduce training time. As shown in Table 6, incorporating ORR results in significant improvements in both precision and success rates for the three baseline trackers. Specifically, ARTrack, GRM, and DropTrack demonstrate an improvement of more than $1.2\\%$ in both precision and success rate across two datasets. These experimental results demonstrate that the proposed ORR component can be seamlessly integrated into existing tracking frameworks, significantly improving tracking accuracy.", + "bbox": [ + 88, + 553, + 482, + 763 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Qualitative Results. Several qualitative tracking results of ORTrack-DeiT and seven SOTA UAV trackers are shown in Fig. 4. As can be seen, only our tracker successfully tracks the targets in all challenging examples, where pose variations, background clusters, and scale variations are presented. In these cases, our method performs significantly better and is more visually appealing, bolstering the effectiveness of the proposed method for UAV tracking.", + "bbox": [ + 88, + 763, + 482, + 885 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Figure 5 shows attention and feature maps produced by", + "bbox": [ + 109, + 885, + 482, + 901 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/305cd2d33c6dc044b94a239788e02caf27d1d84283759f722ef53e9fae001cb9.jpg", + "image_caption": [ + "Figure 4. Qualitative evaluation on 3 video sequences from, respectively, UAV123 [57], UAVDT [19], and VisDrone2018 [98] (i.e., person9, S1607, and uav0000180_00050_s)." + ], + "image_footnote": [], + "bbox": [ + 517, + 89, + 903, + 280 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/016234796faa3fcbf9a729c4310a83731bee82db9c21aaee76b7a33bb734fac4.jpg", + "image_caption": [ + "Figure 5. Visualize the attention map (left) and feature map (right) of the target images. The first row displays the search and masked images with masking ratios of $0\\%$ , $10\\%$ , $30\\%$ , and $70\\%$ . The second and third rows show the attention and feature maps generated by ORTrack-DeiT, with and without ORR, respectively." + ], + "image_footnote": [], + "bbox": [ + 514, + 342, + 903, + 454 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "ORTrack-DeiT, with and without occlusion-robust enhancement. We observe that ORTrack-DeiT with ORR maintains a clearer focus on the targets and exhibits more consistent feature maps across masking ratios. These results support the effectiveness of our ORR component.", + "bbox": [ + 511, + 541, + 908, + 619 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Conclusion", + "text_level": 1, + "bbox": [ + 511, + 630, + 633, + 646 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In view of the common challenges posed by target occlusion in UAV tracking, in this work, we proposed to learn Occlusion-Robust Representation (ORR) by imposing an invariance of feature representation of the target with respect to random masking modeled by a spatial Cox process. Moreover, we propose an Adaptive Feature-Based Knowledge Distillation (AFKD) to enhance efficiency. Our approach is notably straightforward and can be easily integrated into other tracking frameworks. Extensive experiments across multiple UAV tracking benchmarks validate the effectiveness of our method, demonstrating that our ORTrack-DeiT achieves SOTA performance.", + "bbox": [ + 509, + 656, + 906, + 835 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgments. This work was funded by the Guangxi Natural Science Foundation (Grant No. 2024GXNSFAA010484), and the National Natural Science Foundation of China (Nos. 62466013, 62206123).", + "bbox": [ + 511, + 837, + 908, + 898 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 91, + 89, + 187, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Wesam A. Askar, Osama Elmowafy, Anca L. Ralescu, Aliaa Abdel-Halim Youssif, and Gamal A. Elnashar. Occlusion detection and processing using optical flow and particle filter. Int. J. Adv. Intell. Paradigms, 15:63-76, 2020. 3", + "[2] Wenrui Cai, Qingjie Liu, and Yunhong Wang. Hiptrack: Visual tracking with historical prompts. In CVPR, pages 19258-19267, 2024. 6, 7", + "[3] Yidong Cai, Jie Liu, Jie Tang, and Gangshan Wu. Robust object modeling for visual tracking. In ICCV, pages 9589-9600, 2023. 6, 7", + "[4] Ziang Cao, Changhong Fu, Junjie Ye, Bowen Li, and Yiming Li. Hift: Hierarchical feature transformer for aerial tracking. In ICCV, pages 15457-15466, 2021. 1, 2, 6", + "[5] Ziang Cao, Ziyuan Huang, Liang Pan, Shiwei Zhang, Ziwei Liu, and Changhong Fu. Ttrack: Temporal contexts for aerial tracking. In CVPR, pages 14798-14808, 2022. 1, 2, 6", + "[6] Satyaki Chakraborty and Martial Hebert. Learning to track object position through occlusion. ArXiv, abs/2106.10766, 2021. 3", + "[7] T-H Chang and Shaogang Gong. Tracking multiple people with a multi-camera system. In Womot, pages 19-26, 2001. 2", + "[8] Boyu Chen, Peixia Li, Lei Bai, Lei Qiao, and et al. Backbone is all your need: a simplified architecture for visual object tracking. In ECCV, pages 375-392, 2022. 1, 7", + "[9] Guobin Chen, Wongun Choi, Xiang Yu, Tony Han, and et al. Learning efficient object detection models with knowledge distillation. NIPS, 30, 2017. 3", + "[10] Xin Chen, Houwen Peng, Dong Wang, Huchuan Lu, and Han Hu. Seqtrack: Sequence to sequence learning for visual object tracking. In CVPR, pages 14572-14581, 2023. 7", + "[11] Yuanda Chen. Thinning algorithms for simulating point processes. Florida State University, Tallahassee, FL, 2016. 4", + "[12] Cheng Chi, Shifeng Zhang, Junliang Xing, Zhen Lei, S. Li, and Xudong Zou. Pedhunter: Occlusion robust pedestrian detector in crowded scenes. ArXiv, abs/1909.06826, 2019. 3", + "[13] Yutao Cui, Cheng Jiang, and et al. Mixformer: End-to-end tracking with iterative mixed attention. In CVPR, pages 13608-13618, 2022. 1, 2, 5", + "[14] Martin Danelljan, Goutam Bhat, Fahad Shahbaz Khan, and Michael Felsberg. Eco: Efficient convolution operators for tracking. In CVPR, pages 6638-6646, 2017. 6" + ], + "bbox": [ + 93, + 114, + 483, + 898 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[15] Martin Danelljan, Luc Van Gool, and Radu Timofte. Probabilistic regression for visual tracking. In CVPR, pages 7181-7190, 2020. 7", + "[16] Martin Danelljan, Gustav Hager, Fahad Shahbaz Khan, and et al. Discriminative scale space tracking. IEEE TPAMI, 39(8):1561-1575, 2017. 6", + "[17] Soumen Das, Saroj K. Biswas, and Biswajit Purkayastha. Occlusion robust sign language recognition system for indian sign language using cnn and pose features. Multimed. Tools. Appl, 2024. 3", + "[18] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, and et al. An image is worth 16x16 words: Transformers for image recognition at scale. ArXiv, abs/2010.11929, 2020. 6, 8", + "[19] Dawei Du, Yuankai Qi, Hongyang Yu, and et al. The unmanned aerial vehicle benchmark: Object detection and tracking. In ECCV, pages 375-391, 2018. 5, 6, 8", + "[20] Heng Fan, Liting Lin, Fan Yang, and et al. Lasot: A high-quality benchmark for large-scale single object tracking. In CVPR, pages 5369-5378, 2018. 6", + "[21] Yuxin Fang, Quan Sun, Xinggang Wang, and et al. Eva-02: A visual representation for neon genesis. Image and Vision Computing, 149:105171, 2024. 6", + "[22] Changhong Fu, Xiang Lei, and et al. Progressive representation learning for real-time UAV tracking. In IROS, pages 5072-5079, 2024. 6", + "[23] Zhihong Fu, Zehua Fu, Qingjie Liu, Wenrui Cai, and Yunhong Wang. Sparsett: Visual tracking with sparse transformers. arXiv e-prints, 2022. 7", + "[24] Shenyuan Gao, Chunluan Zhou, and Jun Zhang. Generalized relation modeling for transformer tracking. In CVPR, pages 18686-18695, 2023. 8", + "[25] Goutam Yelluru Gopal and Maria A Amer. Separable self and mixed attention transformers for efficient object tracking. In WACV, pages 6708-6717, 2024. 6", + "[26] Jianping Gou, Baosheng Yu, Stephen J Maybank, and Dacheng Tao. Knowledge distillation: A survey. IJCV, 129(6):1789-1819, 2021. 3, 5", + "[27] Karthik Hariharakrishnan and Dan Schonfeld. Fast object tracking using adaptive block matching. IEEE TMM, 7:853-859, 2005. 3", + "[28] Kaiming He, Xinlei Chen, Saining Xie, and et al. Masked autoencoders are scalable vision learners. In CVPR, pages 15979-15988, 2021. 3", + "[29] João F. Henriques, Rui Caseiro, Pedro Martins, and et al. High-speed tracking with kernelized correlation filters. IEEE TPAMI, 37:583-596, 2015. 6", + "[30] L. Huang, X. Zhao, and K. Huang. Got-10k: A large high-diversity benchmark for generic object tracking in the wild. IEEE TPAMI, (5), 2021. 6", + "[31] Ziyuan Huang, Changhong Fu, and et al. Learning aberrance repressed correlation filters for real-time uav tracking. In ICCV, pages 2891-2900, 2019. 2" + ], + "bbox": [ + 516, + 90, + 903, + 901 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[32] Janine Illian, Antti Penttinen, Helga Stoyan, and Dieterich Stoyan. Statistical analysis and modelling of spatial point patterns. John Wiley & Sons, 2008. 3, 4", + "[33] Michal Irani and Shmuel Peleg. Motion analysis for image enhancement: Resolution, occlusion, and transparency. JVCIR, 4(4):324-335, 1993. 2", + "[34] Dippal Israni and Hiren K. Mewada. Feature descriptor based identity retention and tracking of players under intense occlusion in soccer videos. Int. J. Intell. Eng. Syst, 2018. 3", + "[35] Minyang Jiang and et al. Occlusion-robust fau recognition by mining latent space of masked autoencoders. Neurocomputing, 569:127107, 2024. 3", + "[36] Jung Uk Kim, Ju Won Kwon, and et al. BBC net: Bounding-box critic network for occlusion-robust object detection. IEEE TCSVT, 30:1037-1050, 2020. 3", + "[37] Alexander Kirillov, Eric Mintun, and et al. Segment anything. In ICCV, pages 4015-4026, 2023. 7", + "[38] Yutong Kou, Jin Gao, Bing Li, and et al. Zoomtrack: Target-aware non-uniform resizing for efficient visual tracking. NIPS, 36:50959-50977, 2023. 7", + "[39] Thijs P. Kuipers, Devanshu Arya, and Deepak K. Gupta. Hard occlusions in visual object tracking. In ECCV Workshops, 2020. 3", + "[40] Hei Law and Jia Deng. Cornernet: Detecting objects as paired keypoints. IJCV, 128:642-656, 2018. 5", + "[41] Luming Li, Chenglizhao Chen, and Xiaowei Zhang. Mask-guided self-distillation for visual tracking. In ICME, pages 1-6, 2022. 3", + "[42] Shuiwang Li, Yuting Liu, Qijun Zhao, and Ziliang Feng. Learning residue-aware correlation filters and refining scale for real-time uav tracking. Pattern Recognition, 127:108614, 2022. 1, 2, 3, 6", + "[43] Shuiwang Li, Xiangyang Yang, and et al. Learning target-aware vision transformers for real-time uav tracking. IEEE TGRS, 2024. 1", + "[44] Shuiwang Li, Yangxiang Yang, Dan Zeng, and Xucheng Wang. Adaptive and background-aware vision transformer for real-time uav tracking. In ICCV, pages 13943-13954, 2023. 1, 2, 6", + "[45] Siyi Li and D. Y. Yeung. Visual object tracking for unmanned aerial vehicles: A benchmark and new motion models. In AAAI, 2017. 5, 6", + "[46] Yiming Li, Changhong Fu, Fangqiang Ding, and et al. Autotrack: Towards high-performance visual tracking for uav with automatic spatio-temporal regularization. In CVPR, pages 11920-11929, 2020. 1, 2, 6", + "[47] Yongxin Li, Mengyuan Liu, You Wu, and et al. Learning adaptive and view-invariant vision transformer for real-time uav tracking. In ICML, 2024. 6", + "[48] Tsung Yi Lin, Michael Maire, and et al. Microsoft coco: Common objects in context. In ECCV, 2014. 6" + ], + "bbox": [ + 91, + 90, + 485, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[49] Mengyuan Liu, Yuelong Wang, and et al. Global filter pruning with self-attention for real-time uav tracking. In BMVC, page 861, 2022. 1", + "[50] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017. 6", + "[51] David G Lowe. Object recognition from local scale-invariant features. In ICCV, pages 1150-1157, 1999. 2", + "[52] Siyu Ma, Yuting Liu, and et al. Learning disentangled representation in pruning for real-time uav tracking. In ACML, pages 690-705, 2023. 1", + "[53] Torsten Mattfeldt. Stochastic geometry and its applications. Journal of Microscopy, 183:257-257, 1996. 4", + "[54] Christoph Mayer, Martin Danelljan, and et al. Learning target candidate association to keep track of what not to track. In ICCV, pages 13424-13434, 2021. 7", + "[55] Christoph Mayer, Martin Danelljan, and et al. Transforming model prediction for tracking. In CVPR, pages 8721-8730, 2022. 7", + "[56] Matthias Mueller, Adel Bibi, and et al. Trackingnet: A large-scale dataset and benchmark for object tracking in the wild. In ECCV, pages 300-317, 2018. 6", + "[57] Matthias Mueller, Neil G. Smith, and Bernard Ghanem. A benchmark and simulator for uav tracking. In ECCV, 2016. 5, 6, 8", + "[58] Hieu Tat Nguyen and Arnold W. M. Smeulders. Fast occluded object tracking by a robust appearance filter. IEEE TPAMI, 26:1099-1104, 2004. 3", + "[59] Hieu Tat Nguyen, Marcel Worring, and Rein van den Boomgaard. Occlusion robust adaptive template tracking. In ICCV, pages 678-683, 2001. 3", + "[60] Toby C. O’Neil. Geometric measure theory. 2002. 4", + "[61] Jiyan Pan and Bo Hu. Robust occlusion handling in object tracking. In CVPR, pages 1-8, 2007. 3", + "[62] Joo Hyun Park, Yeong Min Oh, and et al. Handoccnet: Occlusion-robust 3d hand mesh estimation network. In CVPR, pages 1486–1495, 2022. 2, 3", + "[63] Wonpyo Park and et al. Relational knowledge distillation. In CVPR, pages 3962-3971, 2019. 3", + "[64] Zhimao Peng, Zechao Li, Junge Zhang, and et al. Few-shot image recognition with knowledge transfer. In ICCV, pages 441-449, 2019. 3", + "[65] Huafeng Qin, Xin Jin, Yun Jiang, Mounim A El-Yacoubi, and Xinbo Gao. Adversarial automixup. arXiv preprint arXiv:2312.11954, 2023. 7", + "[66] Delin Qu, Yizhen Lao, and et al. Towards nonlinear-motion-aware and occlusion-robust rolling shutter correction. ICCV, pages 10646–10654, 2023. 3", + "[67] Seyed Hamid Rezatofighi, Nathan Tsoi, JunYoung Gwak, and et al. Generalized intersection over union:" + ], + "bbox": [ + 514, + 90, + 903, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "A metric and a loss for bounding box regression. \nCVPR, pages 658-666, 2019. 2, 5", + "[68] Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, and et al. Imagenet large scale visual recognition challenge. IJCV, 115:211 - 252, 2014. 5", + "[69] Liangtao Shi, Bineng Zhong, Qihua Liang, Ning Li, Shengping Zhang, and Xianxian Li. Explicit visual prompts for visual object tracking. In AAAI, 2024. 7", + "[70] Abhinav Shrivastava, Abhinav Kumar Gupta, and Ross B. Girshick. Training region-based object detectors with online hard example mining. In CVPR, pages 761-769, 2016. 2", + "[71] Markus Storer and et al. Active appearance model fitting under occlusion using fast-robust pca. In VISAPP, pages 129–136, 2009. 2", + "[72] Chen Sun and et al. Siamohot: A lightweight dual siamese network for onboard hyperspectral object tracking via joint spatial-spectral knowledge distillation. IEEE TGRS, 61:1-12, 2023. 3", + "[73] Hugo Touvron and et al. Training data-efficient image transformers & distillation through attention. In ICML, pages 10347-10357, 2021. 6", + "[74] Wenxuan Tu, Sihang Zhou, and et al. Hierarchically contrastive hard sample mining for graph self-supervised pretraining. IEEE TNNLS, PP, 2023. 2", + "[75] Frederick Tung and Greg Mori. Similarity-preserving knowledge distillation. In ICCV, pages 1365-1374, 2019. 3", + "[76] K. Wang and et al. Region attention networks for pose and occlusion robust facial expression recognition. IEEE TIP, 29:4057-4069, 2019. 2, 3", + "[77] Keze Wang and et al. Towards human-machine cooperation: Self-supervised sample mining for object detection. In CVPR, pages 1605-1613, 2018. 2", + "[78] Lin Wang and Kuk-Jin Yoon. Knowledge distillation and student-teacher learning for visual intelligence: A review and new outlooks. IEEE TPAMI, 44:3048-3068, 2020. 3, 5", + "[79] Xucheng Wang, Xiangyang Yang, and et al. Learning disentangled representation with mutual information maximization for real-time uav tracking. In ICME, pages 1331-1336, 2023. 1", + "[80] Xucheng Wang, Dan Zeng, Qijun Zhao, and Shuiwang Li. Rank-based filter pruning for real-time uav tracking. In ICME, pages 01-06, 2022. 1, 2", + "[81] Xing Wei, Yifan Bai, and et al. Autoregressive visual tracking. In CVPR, pages 9697-9706, 2023. 8", + "[82] Qiangqiang Wu, Tianyu Yang, and et al. Dropmae: Masked autoencoders with spatial-attention dropout for tracking tasks. In CVPR, pages 14561-14571, 2023. 1, 8" + ], + "bbox": [ + 91, + 90, + 482, + 898 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[83] Wanying Wu, Pengzhi Zhong, and Shuiwang Li. Fisher pruning for real-time uav tracking. In IJCNN, pages 1-7, 2022. 1, 2", + "[84] You Wu, Xucheng Wang, Dan Zeng, and et al. Learning motion blur robust vision transformers with dynamic early exit for real-time uav tracking. arXiv preprint arXiv:2407.05383, 2024.1", + "[85] Fei Xie, Chunyu Wang, and et al. Learning tracking representations via dual-branch fully transformer networks. In ICCV, pages 2688–2697, 2021. 2", + "[86] Fei Xie, Chunyu Wang, Guangting Wang, and et al. Correlation-aware deep tracking. In CVPR, pages 8741-8750, 2022. 2", + "[87] Jinxia Xie and et al. Autoregressive queries for adaptive tracking with spatio-temporal transformers. In CVPR, pages 19300-19309, 2024. 6, 7", + "[88] Di Yang and et al. Self-supervised video pose representation learning for occlusion- robust action recognition. In AFGR, pages 1-5, 2021. 3", + "[89] Xiangyang Yang, Dan Zeng, and et al. Adaptively bypassing vision transformer blocks for efficient visual tracking. Pattern Recognition, 161:111278, 2025. 2", + "[90] Liangliang Yao, Changhong Fu, and et al. Sgdvit: Saliency-guided dynamic vision transformer for uav tracking. arXiv preprint arXiv:2303.04378, 2023. 6", + "[91] Botao Ye, Hong Chang, and et al. Joint feature learning and relation modeling for tracking: A one-stream framework. In ECCV, pages 341-357, 2022. 1, 2, 5, 7", + "[92] Sangdoo Yun, Dongyoon Han, and et al. Cutmix: Regularization strategy to train strong classifiers with localizable features. In ICCV, pages 6023-6032, 2019. 7", + "[93] Dan Zeng, Mingliang Zou, Xucheng Wang, and Shuiwang Li. Towards discriminative representations with contrastive instances for real-time uav tracking. In ICME, pages 1349-1354, 2023. 6", + "[94] Chenyuan Zhang, Jiu Xu, and et al. A klt-based approach for occlusion handling in human tracking. In PCS, pages 337-340, 2012. 3", + "[95] Yi Zhang, Pengliang Ji, and et al. 3d-aware neural body fitting for occlusion robust 3d human pose estimation. ICCV, pages 9365-9376, 2023. 3", + "[96] Haojie Zhao, Dong Wang, and Huchuan Lu. Representation learning for visual object tracking by masked appearance transfer. In CVPR, pages 18696-18705, 2023. 7", + "[97]Zikun Zhou, Wenjie Pei, Xin Li, and et al. Saliencyassociated object tracking. In ICCV, pages 9846- 9855,2021.7", + "[98] Pengfei Zhu, Longyin Wen, and et al. Visdrone-vdt2018: The vision meets drone video detection and tracking challenge results. In ECCV Workshops, 2018. 5, 6, 7, 8" + ], + "bbox": [ + 516, + 90, + 903, + 898 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09228/4d91038b-08d1-447d-8037-8ee551148a4c_model.json b/data/2025/2504_09xxx/2504.09228/4d91038b-08d1-447d-8037-8ee551148a4c_model.json new file mode 100644 index 0000000000000000000000000000000000000000..7f111c634999517db8f3d37c0626060baf30b63c --- /dev/null +++ b/data/2025/2504_09xxx/2504.09228/4d91038b-08d1-447d-8037-8ee551148a4c_model.json @@ -0,0 +1,2268 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.263, + 0.061, + 0.706 + ], + "angle": 270, + "content": "arXiv:2504.09228v1 [cs.CV] 12 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.106, + 0.13, + 0.892, + 0.153 + ], + "angle": 0, + "content": "Learning Occlusion-Robust Vision Transformers for Real-Time UAV Tracking" + }, + { + "type": "text", + "bbox": [ + 0.241, + 0.18, + 0.755, + 0.217 + ], + "angle": 0, + "content": "You Wu\\(^{1\\dagger}\\), Xucheng Wang\\(^{2\\dagger}\\), Xiangyang Yang\\(^{1}\\), Mengyuan Liu\\(^{1}\\), Dan Zeng\\(^{3}\\), Hengzhou Ye\\(^{1}\\), Shuiwang Li\\(^{1*}\\)" + }, + { + "type": "text", + "bbox": [ + 0.151, + 0.217, + 0.846, + 0.234 + ], + "angle": 0, + "content": "1College of Computer Science and Engineering, Guilin University of Technology, China" + }, + { + "type": "text", + "bbox": [ + 0.24, + 0.234, + 0.76, + 0.252 + ], + "angle": 0, + "content": "\\(^{2}\\)School of Computer Science, Fudan University, Shanghai, China" + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.252, + 0.785, + 0.269 + ], + "angle": 0, + "content": "\\(^{3}\\)School of Artificial Intelligence, Sun Yat-sen University, Zhuhai, China" + }, + { + "type": "text", + "bbox": [ + 0.108, + 0.273, + 0.885, + 0.305 + ], + "angle": 0, + "content": "wuyou@glut.edu.cn, xcwang317@glut.edu.cn, xyyang317@163.com, mengyuaner1122@foxmail.com, zengd8@mail.sysu.edu.cn, yehengzhou@glut.edu.cn, lishuiwang0721@163.com" + }, + { + "type": "title", + "bbox": [ + 0.248, + 0.34, + 0.327, + 0.354 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.371, + 0.485, + 0.75 + ], + "angle": 0, + "content": "Single-stream architectures using Vision Transformer (ViT) backbones show great potential for real-time UAV tracking recently. However, frequent occlusions from obstacles like buildings and trees expose a major drawback: these models often lack strategies to handle occlusions effectively. New methods are needed to enhance the occlusion resilience of single-stream ViT models in aerial tracking. In this work, we propose to learn Occlusion-Robust Representations (ORR) based on ViTs for UAV tracking by enforcing an invariance of the feature representation of a target with respect to random masking operations modeled by a spatial Cox process. Hopefully, this random masking approximately simulates target occlusions, thereby enabling us to learn ViTs that are robust to target occlusion for UAV tracking. This framework is termed ORTrack. Additionally, to facilitate real-time applications, we propose an Adaptive Feature-Based Knowledge Distillation (AFKD) method to create a more compact tracker, which adaptively mimics the behavior of the teacher model ORTrack according to the task's difficulty. This student model, dubbed ORTrack-D, retains much of ORTrack's performance while offering higher efficiency. Extensive experiments on multiple benchmarks validate the effectiveness of our method, demonstrating its state-of-the-art performance. Codes is available at https://github.com/wuyou3474/ORTrack." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.777, + 0.222, + 0.792 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.802, + 0.484, + 0.879 + ], + "angle": 0, + "content": "Unmanned aerial vehicles (UAVs) are leveraged in a plethora of applications, with increasing emphasis on UAV tracking [4, 43, 46, 49, 52, 79, 84]. This form of tracking poses an exclusive set of challenges such as tricky viewing angles, motion blur, severe occlusions, and the need for" + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.339, + 0.907, + 0.522 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.512, + 0.533, + 0.907, + 0.588 + ], + "angle": 0, + "content": "Figure 1. Compared to SOTA UAV trackers on UAVDT, our ORTrack-DeiT sets a new record with \\(83.4\\%\\) precision and a speed of 236 FPS. Our ORTrack-D-DeiT strikes a better trade-off with \\(82.5\\%\\) precision and a speed of about 313 FPS." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.624, + 0.907, + 0.714 + ], + "angle": 0, + "content": "efficiency due to UAVs' restricted battery life and computational resources [5, 42, 80, 83]. Consequently, designing an effective UAV tracker requires a delicate balance between precision and efficiency. It needs to ensure accuracy while being conscious of the UAV's energy and computational constraints." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.72, + 0.909, + 0.903 + ], + "angle": 0, + "content": "In recent years, there has been a notable shift from discriminative correlation filters (DCF)-based methods, because of their unsatisfactory robustness, towards DL-based approaches, particularly with the adoption of single-stream architectures that integrate feature extraction and fusion via pre-trained Vision Transformer (ViT) backbone networks. This single-stream paradigm has proven highly effective in generic visual tracking, as evidenced by the success of recent methods such as OSTrack [91], SimTrack [8], Mixformer [13], and DropMAE [82]. Building on these advancements, Aba-VTrack [44] introduces a lightweight DL-based tracker within this framework, employing an adap" + }, + { + "type": "page_footnote", + "bbox": [ + 0.115, + 0.887, + 0.38, + 0.901 + ], + "angle": 0, + "content": "† Equal contribution. * Corresponding authors." + } + ], + [ + { + "type": "text", + "bbox": [ + 0.094, + 0.092, + 0.481, + 0.286 + ], + "angle": 0, + "content": "tive and background-aware token computation method to enhance inference speed, which demonstrates remarkable precision and speed for real-time UAV tracking. However, the use of a variable number of tokens in Aba-VTrack incurs significant time costs, primarily due to the unstructured access operations required during inference. Adding to this, it also grappled with establishing robustness when facing target occlusion, a challenge common in UAV tracking often triggered by obstructive elements like buildings, mountains, trees, and so forth. The problem is exacerbated by the fact that UAVs may not always be capable of circumventing these impediments due to potential large-scale movements involved." + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.292, + 0.481, + 0.806 + ], + "angle": 0, + "content": "To address these issues, we introduce a novel framework designed to enhance the occlusion robustness of ViTs for UAV tracking. Our approach, termed ORTrack, aims to learn ViT-based trackers that maintain robust feature representations even in the presence of target occlusion. This is achieved by enforcing an invariance in the feature representation of the target with respect to random masking operations modeled by a spatial Cox process. The random masking serves as a simulation of target occlusion, which is expected to mimic real occlusion challenges in UAV tracking and aid in learning Occlusion-Robust Representations (ORR). Notably, our method for learning occlusion-robust representation simply uses a Mean Squared Error (MSE) loss during training, adding no extra computational load during inference. Additionally, to enhance efficiency for real-time applications, we introduce an Adaptive Feature-Based Knowledge Distillation (AFKD) method. This method creates a more compact tracker, named ORTrack-D, which adaptively mimics the behavior of the teacher model ORTrack based on the complexity of the tracking task during training. The reasoning is that the teacher model, in its pursuit of powerful representations, may compromise its generalizability. Hence, in situations where generalizability is vital, the student model may perform better, and closely mimicking the teacher's behavior becomes less important. We use the deviation of GIoU loss [67] from its average value to quantify the difficulty of the tracking task, which makes sense as loss value is a commonly used criteria to define hard samples [70, 74, 77]. ORTrack-D maintains much of ORTrack's performance with higher efficiency, making it better suited for deployment in resource-constrained environments typical of UAV applications. Extensive experiments on four benchmarks show that our method achieves state-of-the-art performance." + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.811, + 0.481, + 0.9 + ], + "angle": 0, + "content": "In summary, our contributions are as follows: (i) We propose to learn Occlusion-Robust Representations (ORR) by imposing an invariance in the feature representation of the target with respect to random masking operations modeled by a spatial Cox process, which can be easily integrated into other tracking frameworks without requiring additional ar" + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.092, + 0.904, + 0.272 + ], + "angle": 0, + "content": "chitectures or increasing inference time; (ii) We propose an Adaptive Feature-Based Knowledge Distillation (AFKD) method to further enhance efficiency, in which the student model adaptively mimics the behavior of the teacher model according to the task's difficulty, resulting in a significant increase in tracking speed while only minimally reducing accuracy; (iii) We introduce ORTrack, a family of efficient trackers based on these components, which integrates seamlessly with other ViT-based trackers. ORTrack demonstrates superior performance while maintaining extremely fast tracking speeds. Extensive evaluations show that ORTrack achieves state-of-the-art real-time performance." + }, + { + "type": "title", + "bbox": [ + 0.518, + 0.289, + 0.649, + 0.303 + ], + "angle": 0, + "content": "2. Related work" + }, + { + "type": "title", + "bbox": [ + 0.518, + 0.314, + 0.674, + 0.329 + ], + "angle": 0, + "content": "2.1. Visual Tracking." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.338, + 0.904, + 0.698 + ], + "angle": 0, + "content": "In visual tracking, the primary approaches consist of DCF-based and DL-based trackers. DCF-based trackers are favored for UAV tracking due to their remarkable efficiency, but they face difficulties in maintaining robustness under complex conditions [31, 42, 46]. Recently developed lightweight DL-based trackers have improved tracking precision and robustness for UAV tracking [4, 5]; however, their efficiency lags behind that of most DCF-based trackers. Model compression techniques like those in [80, 83] have been used to further boost efficiency, yet these trackers still face issues with tracking precision. Vision Transformers (ViTs) are gaining traction for streamlining and unifying frameworks in visual tracking, as seen in studies like [13, 85, 86, 89, 91]. While these frameworks are compact and efficient, few are based on lightweight ViTs, making them impractical for real-time UAV tracking. To address this, Aba-ViTrack [44] used lightweight ViTs and an adaptive, background-aware token computation method to enhance efficiency for real-time UAV tracking. However, the variable token number in this approach necessitates unstructured access operations, leading to significant time costs. In this work, we aim to improve the efficiency of ViTs for UAV tracking through knowledge distillation, a more structured method." + }, + { + "type": "title", + "bbox": [ + 0.518, + 0.712, + 0.872, + 0.727 + ], + "angle": 0, + "content": "2.2. Occlusion-Robust Feature Representation." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.736, + 0.904, + 0.9 + ], + "angle": 0, + "content": "Occlusion-robust feature representation is crucial in computer vision and image processing. It involves developing methods that can recognize and process objects in images even when parts are hidden or occluded [62, 76]. Early efforts often relied on handcrafted features, active appearance models, motion analysis, sensor fusion, etc [7, 33, 51, 71]. While effective in some cases, these methods struggled with the complexity and variability of real-world visual data. The advent of deep learning revolutionized the field. Many studies have applied Convolutional Neural Networks (CNNs) and other deep architectures to" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.482, + 0.364 + ], + "angle": 0, + "content": "extract occlusion-robust representations [35, 62, 66, 76]. These approaches use deep models to capture complex patterns and variations in visual data, making learned features resilient to occlusions and having proven valuable for many computer vision applications, such as action recognition [17, 88], pose estimation [62, 95], and object detection [12, 36]. The exploration of occlusion-robust representations in visual tracking has also demonstrated great success [1, 6, 27, 34, 39, 58, 59, 61, 94]. However, to our knowledge, there is a dearth of research to explore learning occlusion-robust ViTs particularly in a unified framework for UAV tracking. In this study, we delve into the exploration of learning occlusion-robust feature representations based on ViTs by simulating occlusion challenges using random masking modeled by a spatial Cox process, specifically tailored for UAV tracking. This study represents the first use of ViTs for acquiring occlusion-robust feature representations in UAV tracking." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.372, + 0.308, + 0.387 + ], + "angle": 0, + "content": "2.3. Knowledge Distillation." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.395, + 0.483, + 0.773 + ], + "angle": 0, + "content": "Knowledge distillation is a technique used to compress models by transferring knowledge from a complex \"teacher\" model to a simpler \"student\" model, with the aim of maintaining performance while reducing computational resources and memory usage [63, 75]. It involves various types of knowledge, distillation strategies, and teacher-student architectures, typically falling into three categories: response-based, feature-based, and relation-based distillation [26, 63, 78]. Widely applied in tasks such as image classification [64], object detection [9], and neural machine translation [42], it offers potential to improve the efficiency and even effectiveness of deep learning models. Recently, it has been successfully utilized to enhance the efficiency of DL-based trackers. For instance, Li et al. [41] used mask-guided self-distillation to compress Siamese-based visual trackers. Sun et al. [72] introduced a lightweight dual Siamese tracker for hyperspectral object tracking, using a spatial-spectral knowledge distillation method to learn from a deep tracker. However, these techniques are mainly Siamese-based and tailored to specific tracking frameworks, posing challenges for adaptation to our ViT-based approach. In this study, we propose a simple yet effective feature-based knowledge distillation method, in which the student adaptively replicate the behavior of the teacher based on the complexity of the tracking task during training." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.785, + 0.182, + 0.8 + ], + "angle": 0, + "content": "3. Method" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.81, + 0.482, + 0.902 + ], + "angle": 0, + "content": "In this section, we first provide a brief overview of our end-to-end tracking framework, named ORTrack, as shown in Figure 2. Then, we introduce the occlusion-robust representation learning based on spatial Cox processes and the method of adaptive knowledge distillation. Finally, we detail the prediction head and training loss." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.091, + 0.625, + 0.106 + ], + "angle": 0, + "content": "3.1. Overview" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.114, + 0.907, + 0.522 + ], + "angle": 0, + "content": "The proposed ORTrack introduces an novel single-stream tracking framework, featuring a spatial Cox process-based masking for occlusion-robust representation learning and an adaptive feature-based knowledge distillation pipeline. ORTrack consists of two sequential training phases: the teacher model training pipeline for learning occlusion-robust representations, followed by the student training pipeline involving adaptive knowledge distillation. In the teacher model training phase, the input includes a target template \\( Z \\in \\mathbb{R}^{3 \\times H_z \\times W_z} \\) of spatial size \\( H_z \\times W_z \\), a randomly masked target template \\( Z' = \\mathfrak{m}(Z) \\), and a search image \\( X \\in \\mathbb{R}^{3 \\times H_x \\times W_x} \\) of spatial size \\( H_x \\times W_x \\), where \\( \\mathfrak{m}(\\cdot) \\) represents the random masking operation that masks out non-overlap patches of size \\( b \\times b \\) with a certain masking ratio \\( \\sigma \\). To achieve occlusion-robust representation with ViTs, we minimize the mean squared error (MSE) between two versions of the template representation: one with random masking and one without. During the training of the student model, the teacher's weights remain fixed while both the teacher and student models receive inputs \\( Z \\) and \\( X \\). Let \\( \\mathfrak{B}_T \\) and \\( \\mathfrak{B}_S \\) represent the backbones of the teacher and student, respectively. In our implementation, \\( \\mathfrak{B}_T \\) and \\( \\mathfrak{B}_S \\) share the same structure of the ViT layer but differ in the number of layers. Feature-based knowledge distillation is used to transfer the knowledge embedded in the teacher model's backbone features to the student model through an adaptive distillation loss." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.531, + 0.905, + 0.562 + ], + "angle": 0, + "content": "3.2. Occlusion-Robust Representations (ORR) Based on Spatial Cox Processes" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.569, + 0.907, + 0.809 + ], + "angle": 0, + "content": "To begin, we describe two random masking operations used to simulate occlusion challenges: one from MAE [28] and our proposed method based on a Spatial Cox process, denoted by \\(\\mathfrak{m}_{\\mathrm{U}}\\) and \\(\\mathfrak{m}_{\\mathrm{C}}\\), respectively. Although \\(\\mathfrak{m}_{\\mathrm{U}}\\) allows the model to learn robust representations that are less sensitive to noise or missing information by randomly ignoring certain parts of the input data during training [28], it is less effective when used to simulate occlusion since each spatial position (in the sense of block size) is masked out with equal probability, especially in our situation where the target template generally contains background. To ensure that the target is masked out as expected with higher probabilities at a given masking ratio, thereby making the occlusion simulation more effective, we employ a finite Cox process [32] to model this masking operation, which is detailed as follows." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.811, + 0.909, + 0.902 + ], + "angle": 0, + "content": "Define two associated random matrices \\(\\mathbf{m} = (m_{i,j})\\), \\(\\mathbf{b} = (b_{i,j})\\), \\(1 \\leqslant i \\leqslant H_z / b\\), \\(1 \\leqslant j \\leqslant W_z / b\\), where \\(m_{i,j} \\sim \\mathcal{U}(0,1)\\) (i.e., \\(m_{i,j}\\) follows a uniform distribution over the interval [0, 1]), \\(b_{i,j} \\in \\{0,1\\}\\) equals 1 if \\(m_{i,j} \\in \\mathrm{TopK}(\\mathbf{m}, K)\\), and 0 otherwise. \\(\\mathrm{TopK}(\\mathbf{m}, K)\\) returns the \\(K = \\lfloor (1 - \\sigma)H_zW_z \\rfloor\\) largest elements from \\(\\mathbf{m}\\), where \\(\\lfloor x \\rfloor\\) rounds \\(x\\) to" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.179, + 0.087, + 0.833, + 0.334 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.089, + 0.345, + 0.908, + 0.389 + ], + "angle": 0, + "content": "Figure 2. Overview of the proposed ORTrack framework, which includes separate training pipelines for a teacher and a student model. Note that the spatial Cox process-based masking and occlusion-robust representation learning are applied only in the teacher pipeline. Once the teacher is trained, its weights are fixed for training the student model with the proposed adaptive knowledge distillation." + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.413, + 0.486, + 0.89 + ], + "angle": 0, + "content": "the nearest integer. Mathematically, \\(\\mathfrak{m}_{\\mathrm{U}}(Z) = Z\\odot (\\mathbf{b}\\otimes \\mathbf{1})\\) where \\(\\odot\\) denotes the Hadamard product and \\(\\otimes\\) denotes the tensor product, \\(\\mathbf{1}\\) is an all-ones matrix of size \\(b\\times b\\). Before defining \\(\\mathfrak{m}_{\\mathrm{C}}\\), we establish core notations relevant to spatial Cox processes. It extends the concept of spatial inhomogeneous Poisson point processes by incorporating a random intensity function, which, in turn, is defined as a Poisson point process with an intensity determined by a location-dependent function in the underlying space. For Euclidean space \\(\\mathbb{R}^2\\), an inhomogeneous Poisson point process is defined by a locally integrable positive intensity function \\(\\lambda \\colon \\mathbb{R}^2\\to [0,\\infty)\\), such that for every bounded region \\(\\mathcal{B}\\) the integral \\(\\Lambda (\\mathcal{B}) = \\int_{\\mathcal{B}}\\lambda (x,y)\\mathrm{d}xdy\\) is finite, where \\(\\Lambda (\\mathcal{B})\\) has the interpretation of being the expected number of points of the Poisson process located in \\(\\mathcal{B}\\), and for every collection of disjoint bounded Borel measurable sets \\(\\mathcal{B}_1,\\dots,\\mathcal{B}_k\\) [60], its number distributions is defined by \\(\\operatorname*{Pr}\\{\\mathrm{N}(\\mathcal{B}_i) = n_i,i = 1,\\ldots ,k\\} = \\prod_{i = 1}^{k}\\frac{(\\Lambda(\\mathcal{B}_i))^{n_i}}{n_i!} e^{-\\Lambda (\\mathcal{B}_i)}\\), \\(n_i\\in \\mathbb{Z}^{0 + }\\), where \\(\\operatorname*{Pr}\\) denotes the probability measure, \\(\\mathrm{N}\\) indicates the random counting measure such that \\(\\Lambda (\\mathcal{B}) = \\mathbb{E}[\\mathrm{N}(\\mathcal{B})]\\), \\(\\mathbb{E}\\) is the expectation operator. In particular, the conditional distribution of the points in a bounded set \\(\\mathcal{B}\\) given that \\(\\mathrm{N}(\\mathcal{B}) = n\\in \\mathbb{Z}^{0 + }\\) is not uniform, and \\(f_{n}(p_{1},\\dots,p_{n}) = \\prod_{n}^{i = 1}\\frac{\\lambda(p_{i})}{\\Lambda(\\mathcal{B})}\\), \\(p_1,\\dots,p_n\\in \\mathcal{B}\\) defines the corresponding location density function of the \\(n\\) points. Since a Cox process can be regarded as the result of a two-stage random mechanism for which it is sometimes termed 'doubly stochastic Poisson process' [32], the finite Cox processes can be simulated in a straightforward way based on the hierarchical nature of the model. Specifically, in the first step, the intensity \\(\\lambda (x,y)\\) is generated. In the second step, an in" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.414, + 0.907, + 0.519 + ], + "angle": 0, + "content": "homogeneous Poisson point process is simulated using the generated \\(\\lambda(x, y)\\) [32, 53]. The thinning algorithm [11] is used here for simulating inhomogeneous Poisson point processes. It involves simulating a homogeneous Poisson point process with a higher rate than the maximum possible rate of the inhomogeneous process, and then \"thinning\" out the generated points to match the desired intensity function." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.52, + 0.909, + 0.598 + ], + "angle": 0, + "content": "In this work, the randomness of the intensity function is modeled by a random variable \\(\\Gamma\\) that has a Poisson distribution with expectation of \\(\\varsigma\\), namely, \\(\\operatorname{Pr}\\{\\Gamma = k\\} = \\frac{\\varsigma^k e^{-\\varsigma}}{k!}\\) where \\(k \\in \\mathbb{Z}^{0+}\\). The intensity function of the inhomogeneous Poisson point process is then given by" + }, + { + "type": "equation", + "bbox": [ + 0.615, + 0.603, + 0.907, + 0.64 + ], + "angle": 0, + "content": "\\[\n\\lambda (x, y) = \\frac {\\Gamma e ^ {- (x ^ {2} + y ^ {2})}}{\\int_ {\\mathcal {B}} e ^ {- (x ^ {2} + y ^ {2})} d x d y}. \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.644, + 0.909, + 0.869 + ], + "angle": 0, + "content": "Note that \\(\\lambda(x, y)\\) is a bell-shape function that gives more intensities to the central area of \\(\\mathcal{B}\\). Let \\(\\mathcal{B}\\) denote the rectangle region of size \\(H_z / b \\times W_z / b\\) representing the template region. If we simulate the Cox process within \\(\\mathcal{B}\\) and denote a resulted point pattern by \\(\\Xi\\), we can obtain a matrix \\(\\mathbf{b}' = (b_{i,j}')_{1 \\leqslant i \\leqslant H_z / b, 1 \\leqslant i \\leqslant W_z / b}\\), where \\(b_{i,j}'\\) equals 1 if \\((i, j) \\in \\Xi\\), and 0 otherwise, with which our \\(\\mathfrak{m}_{\\mathbb{C}}\\) can be defined as \\(\\mathfrak{m}_{\\mathbb{C}}(Z) = Z \\odot (\\mathbf{b}' \\otimes \\mathbf{1})\\). It is worthy of note that if \\(\\varsigma = [(1 - \\sigma)H_zW_z]\\), since \\(\\mathbb{E}[\\Lambda(\\mathcal{B})] = \\mathbb{E}[\\int_{\\mathcal{B}} \\lambda(x, y) dxdy] = \\mathbb{E}[\\Gamma] = \\varsigma\\), in this case, the expected masking ratio of our masking operation is equal to the masking ratio of \\(\\mathfrak{m}_{\\mathbb{C}}\\). Thus, in addition to inhomogeneous intensity, our method can simulate more diverse pattern of occlusion due to the introduced randomness of the masking ratio." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.871, + 0.908, + 0.902 + ], + "angle": 0, + "content": "We denote the total number of tokens by \\(\\mathcal{K}\\), the embedding dimension of each token by \\(d\\), and all the tokens out" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.091, + 0.485, + 0.306 + ], + "angle": 0, + "content": "put by the \\(L\\)-th layer of \\(\\mathfrak{B}_T\\) with respect to inputs \\(X\\) and \\(Z\\) by \\(\\mathbf{t}_{1:\\mathcal{K}}^L (Z,X;\\mathfrak{B}_T)\\in \\mathbb{R}^{\\mathcal{K}\\times d}\\). Let \\(\\mathbf{t}_{\\mathcal{K}_Z\\cup \\mathcal{K}_X}^L (Z,X;\\mathfrak{B}_T) = \\mathbf{t}_{1:\\mathcal{K}}^L (Z,X;\\mathfrak{B}_T)\\), where \\(\\mathcal{K}_Z\\cup \\mathcal{K}_X = [1,\\mathcal{K}]\\), \\(\\mathbf{t}_{\\mathcal{K}_Z}^L\\) and \\(\\mathbf{t}_{\\mathcal{K}_X}^L\\) represent the tokens corresponding to the template and the search image, respectively. By the same token, the output tokens corresponding to inputs \\(X\\) and \\(Z'\\) are \\(\\mathbf{t}_{1:\\mathcal{K}}^L (Z',X;\\mathfrak{B}_T)\\). The feature representations of \\(Z\\) and \\(Z'\\) can be recovered by tracking their token indices in respective ordered sequences, which specifically are \\(t_{1:\\mathcal{K}_z}^L (Z,X;\\mathfrak{B}_T)\\) and \\(t_{1:\\mathcal{K}_z}^L (Z',X;\\mathfrak{B}_T)\\), respectively. The core idea of our occlusion-robust representations learning is that the mean square error between the feature representation of \\(Z\\) and that of \\(Z'\\) is minimized, which is implemented by minimizing the following MSE loss," + }, + { + "type": "equation", + "bbox": [ + 0.131, + 0.314, + 0.483, + 0.332 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {o r r} = \\left\\| t _ {1: \\mathcal {K} _ {z}} ^ {L} (Z, X; \\mathfrak {B} _ {T}) - t _ {1: \\mathcal {K} _ {z}} ^ {L} \\left(Z ^ {\\prime}, X; \\mathfrak {B} _ {T}\\right) \\right\\| ^ {2}. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.34, + 0.484, + 0.416 + ], + "angle": 0, + "content": "During inference, only \\([Z,X]\\) is input to the model without the need for random template masking. Consequently, our method incurs no additional computational cost during inference. Notably, our method is independent of the ViTs used, any efficient ViTs can work in our framework." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.425, + 0.484, + 0.456 + ], + "angle": 0, + "content": "3.3. Adaptive Feature-Based Knowledge Distillation (AFKD)" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.463, + 0.484, + 0.78 + ], + "angle": 0, + "content": "Feature-based knowledge distillation is a technique in machine learning that trains a smaller student model to mimic a larger teacher model, which, instead of focusing only on final outputs, transfers intermediate features or representations from the teacher to the student [26, 78]. This method uses the detailed internal representations from the teacher model to improve the student's learning process. However, there is a risk that the student model might overfit to the specific features of the teacher model, rather than generalizing well to new data. This can be particularly problematic if the teacher model has learned spurious correlations in the data. To combat this, we propose adaptively transferring knowledge based on the difficulty of the tracking task. We quantify this difficulty using the deviation of the GIoU loss [67] (see Section 3.4) from its average value, calculated between the student's prediction and the ground truth. Adapting knowledge transfer based on difficulty ensures that the student model doesn't heavily adjust its weights on easy tasks, which it can handle already probably due to its generalizability. Instead, it focuses more on challenging scenarios where its feature representation is less effective." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.781, + 0.484, + 0.901 + ], + "angle": 0, + "content": "Additionally, the choice of teacher-student architectures is crucial in knowledge distillation. Given the wide array of possible student models, we adopt a self-similar approach where the student model mirrors the teacher's architecture but employs a smaller ViT backbone, using fewer ViT blocks. This strategy simplifies the design and eliminates the need for additional alignment techniques that would otherwise be necessary due to mismatched feature di" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.907, + 0.228 + ], + "angle": 0, + "content": "mensions. Lastly, layer selection and the metric of feature similarity are also crucial aspects of feature-based knowledge distillation. Given MSE's popularity in feature-based knowledge distillation and to avoid potential complexity associated with using multiple layers, we employ MSE to penalize differences between the output feature representations of both the teacher and student model's backbones, i.e., \\( t_{1:\\mathcal{K}}^{L}(Z,X;\\mathfrak{B}_{T}) \\) and \\( t_{1:\\mathcal{K}}^{L}(Z,X;\\mathfrak{B}_{S}) \\). The proposed adaptive knowledge distillation loss is defined by" + }, + { + "type": "equation", + "bbox": [ + 0.513, + 0.235, + 0.931, + 0.266 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {a f k d} = (\\alpha + \\beta \\left(\\mathcal {L} _ {i o u} - \\overline {{\\mathcal {L} _ {i o u}}}\\right)) \\| t _ {1: \\mathcal {K}} ^ {L} (Z, X; \\mathfrak {B} _ {T}) - t _ {1: \\mathcal {K}} ^ {L} (Z, X; \\mathfrak {B} _ {S}) \\| ^ {2}, \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.266, + 0.907, + 0.326 + ], + "angle": 0, + "content": "where \\(\\alpha + \\beta (\\mathcal{L}_{iou} - \\overline{\\mathcal{L}_{iou}}) \\coloneqq \\varpi (\\mathcal{L}_{iou}; \\alpha, \\beta)\\) is a function of the deviation of GIoU loss from its average, with slop \\(\\alpha\\) and intercept \\(\\beta\\), used to quantify the difficulty of the tracking task." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.336, + 0.819, + 0.352 + ], + "angle": 0, + "content": "3.4. Prediction Head and Training Loss" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.359, + 0.909, + 0.617 + ], + "angle": 0, + "content": "Following the corner detection head in [13, 91], we use a prediction head consisting of multiple Conv-BN-ReLU layers to directly estimate the bounding box of the target. The output tokens corresponding to the search image are first reinterpreted to a 2D spatial feature map and then fed into the prediction head. The head outputs a local offset \\(\\mathbf{o} \\in [0,1]^{2 \\times H_x / P \\times W_x / P}\\), a normalized bounding box size \\(\\mathbf{s} \\in [0,1]^{2 \\times H_x / P \\times W_x / P}\\), and a target classification score \\(\\mathbf{p} \\in [0,1]^{H_x / P \\times W_x / P}\\) as prediction outcomes. The initial estimation of the target position depends on identifying the location with the highest classification score, i.e., \\((x_c, y_c) = \\operatorname{argmax}_{(x,y)} \\mathbf{p}(x,y)\\). The final target bounding box is estimated by \\(\\{(x_t, y_t); (w,h)\\} = \\{(x_c, y_c) + \\mathbf{o}(x_c, y_c); \\mathbf{s}(x_c, y_c)\\}\\). For the tracking task, we adopt the weighted focal loss [40] for classification, a combination of \\(L_1\\) loss and GIoU loss [67] for bounding box regression. The total loss for tracking prediction is:" + }, + { + "type": "equation", + "bbox": [ + 0.592, + 0.626, + 0.905, + 0.643 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {p r e d} = \\mathcal {L} _ {c l s} + \\lambda_ {i o u} \\mathcal {L} _ {i o u} + \\lambda_ {L _ {1}} \\mathcal {L} _ {L _ {1}}, \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.651, + 0.909, + 0.773 + ], + "angle": 0, + "content": "where the constants \\(\\lambda_{iou} = 2\\) and \\(\\lambda_{L_1} = 5\\) are set as in [13, 91]. The overall loss \\(\\mathcal{L}_T = \\mathcal{L}_{pred} + \\gamma \\mathcal{L}_{orr}\\) is used to train the teacher end-to-end after loading the pretrained weights of the ViT trained with ImageNet [68], where the constant \\(\\gamma\\) is set to \\(2.0 \\times 10^{-4}\\). After this training, we fix the weights of the teacher model, and employ the overall loss \\(\\mathcal{L}_S = \\mathcal{L}_{pred} + \\mathcal{L}_{afkd}\\), for end-to-end knowledge distillation training." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.785, + 0.646, + 0.802 + ], + "angle": 0, + "content": "4. Experiments" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.81, + 0.907, + 0.903 + ], + "angle": 0, + "content": "We evaluate our method on four UAV tracking benchmarks: DTB70 [45], UAVDT [19], VisDrone2018 [98], and UAV123 [57]. All experiments run on a PC with an i9-10850K processor, 16GB RAM, and an NVIDIA TitanX GPU. We compare our method against 26 state-of-the-art trackers, using their official codes and hyper" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.089, + 0.907, + 0.131 + ], + "angle": 0, + "content": "Table 1. Precision (Prec.), success rate (Succ.), and speed (FPS) comparison between ORTrack and lightweight trackers on four UAV tracking benchmarks, i.e., DTB70 [45], UAVDT [19], VisDrone2018 [98], and UAV123 [57]. Red, blue and green indicate the first, second and third place. Note that the percent symbol (\\%) is omitted for all Prec. and Succ. values." + }, + { + "type": "table", + "bbox": [ + 0.129, + 0.143, + 0.872, + 0.327 + ], + "angle": 0, + "content": "
MethodSourceDTB70UAVDTVisDrone2018UAV123Avg.Avg.FPSFLOPs (GMac)Param. (M)
Prec.Succ.Prec.Succ.Prec.Succ.Prec.Succ.Prec.Succ.GPUCPU
DCF-basedKCF [29]TAPMI 1546.828.057.129.068.541.352.333.156.232.9-624.3--
fDSST [16]TPAMI 1753.435.766.638.369.851.058.340.562.041.4-193.4--
ECO_HC [14]CVPR 1763.544.869.441.680.858.171.049.671.248.5-83.5--
AutoTrack [46]CVPR 2071.647.871.845.078.857.368.947.272.849.3-57.8--
RACF [42]PR 2272.650.577.349.483.460.070.247.775.951.8-35.6--
CNN-basedHiFT [4]ICCV 2180.259.465.247.571.952.678.759.074.054.6160.3-7.29.9
TCTrack [5]CVPR 2281.262.272.553.079.959.480.060.578.458.8149.6-8.89.7
SGDViT [90]ICRA 2378.560.465.748.072.152.175.457.572.954.5110.5-11.323.3
DRCI [93]ICME 2381.461.884.059.083.460.076.759.781.460.1281.362.73.68.8
PRL-Track [22]IROSS 2479.560.673.153.572.653.879.159.376.156.8132.3-7.412.0
VIT-basedAba-ViTrack [44]ICCV 2385.966.483.459.986.165.386.466.485.564.5181.550.32.48.0
SMAT [25]WACV 2481.963.880.858.782.563.481.864.681.862.6126.8-3.28.6
AVTrack-DeiT [47]ICML 2484.365.082.158.786.065.384.866.884.263.8260.359.80.97-1.93.5-7.9
ORTrack-DeiTOurs86.266.483.460.188.666.884.366.485.665.0226.455.42.47.9
ORTrack-D-DeiT83.765.182.559.784.663.984.066.183.763.7292.364.71.55.3
" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.352, + 0.483, + 0.412 + ], + "angle": 0, + "content": "parameters. We evaluate our approach against 13 state-of-the-art (SOTA) lightweight trackers (see Table 1) and 14 SOTA deep trackers designed specifically for generic visual tracking (refer to Table 2)." + }, + { + "type": "title", + "bbox": [ + 0.09, + 0.421, + 0.308, + 0.437 + ], + "angle": 0, + "content": "4.1. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.445, + 0.483, + 0.656 + ], + "angle": 0, + "content": "We adopt different ViTs as backbones, including ViT-tiny [18], Eva-tiny [21], and DeiT-tiny [73], to build three trackers for evaluation: ORTrack-ViT, ORTrack-Eva, and ORTrack-DeiT. The head of ORTrack consists of a stack of four Conv-BN-ReLU layers. The search region and template sizes are set to \\(256 \\times 256\\) and \\(128 \\times 128\\), respectively. A combination of training sets from GOT-10k [30], LaSOT [20], COCO [48], and TrackingNet [56] is used for the training. The batch size is set to 32. We employ the AdamW optimizer [50], with a weight decay of \\(10^{-4}\\) and an initial learning rate of \\(4 \\times 10^{-5}\\). The training is conducted over 300 epochs, with 60,000 image pairs processed in each epoch. The learning rate is reduced by a factor of 10 after 240 epochs." + }, + { + "type": "title", + "bbox": [ + 0.09, + 0.666, + 0.345, + 0.683 + ], + "angle": 0, + "content": "4.2. State-of-the-art Comparison" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.689, + 0.483, + 0.901 + ], + "angle": 0, + "content": "Comparison with Lightweight Trackers. The overall performance of our ORTrack in comparison to 13 competing trackers on the four benchmarks is displayed in Table 1. As can be seen, our trackers demonstrate superior performance among all these trackers in terms of average (Avg.) precision (Prec.), success rate (Succ.) and speeds. On average, RACF [42] demonstrated the highest Prec. \\((75.9\\%)\\) and Succ. \\((51.8\\%)\\) among DCF-based trackers, DRCI [93] achieves the highest precision and success rates, with \\(81.4\\%\\) and \\(60.1\\%\\), respectively, among CNN-based trackers. However, the average Prec. and Succ. of all our trackers are greater than \\(82.0\\%\\) and \\(62.0\\%\\), respectively, clearly surpassing DCF- and CNN- based approaches. Additionally, our ORTrack-DeiT achieves the highest Avg. Prec. and" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.352, + 0.907, + 0.683 + ], + "angle": 0, + "content": "Avg. Succ. of \\(85.6\\%\\) and \\(65.0\\%\\), respectively, among all competing trackers. Although Aba-ViT track achieves performance close to our ORTrack-DeiT, its GPU speed is significantly lower, with a \\(23.6\\%\\) relative gap. Notably, when the proposed adaptive knowledge distillation is applied to ORTrack-DeiT, the resulting student model, ORTrack-D-DeiT, shows a significant speed increase: \\(29.1\\%\\) on GPU and \\(16.8\\%\\) on CPU. This improvement is accompanied by a minimal reduction in accuracy, with only a \\(1.9\\%\\) decrease in Avg. Prec. and a \\(1.3\\%\\) decrease in Avg. Succ.. All proposed trackers can run in real-time on a single \\(\\mathbf{CPU}^*\\) and our ORTrack-DeiT sets a new performance record for real-time UAV tracking. We also compare the floating point operations per second (FLOPs) and number of parameters (Params.) of our method with CNN-based and ViT-based trackers in Table 1. Our method demonstrates a relatively lower parameter count and reduced computational complexity compared to these approaches. Notably, since AVTrack-DeiT tracker features adaptive architectures, the FLOPs and parameters range from minimum to maximum values. These results highlight our method's effectiveness and its state-of-the-art performance." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.684, + 0.909, + 0.835 + ], + "angle": 0, + "content": "Comparison with Deep Trackers. The proposed ORTrack-DeiT is also compared with 14 SOTA deep trackers in Table 2, which shows precision (Prec.) and GPU speed on VisDrone2018. Our ORTrack-DeiT surpasses all other methods in both metrics, demonstrating its superior accuracy and speed. Although trackers like AQATrack [87], HIPTrack [2], and ROMTrack [3] achieve precision comparable to our ORTrack-DeiT, their GPU speeds are much slower. Specifically, our method is 4, 6, and 4 times faster than AQATrack, HIPTrack, and ROMTrack, respectively." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.835, + 0.907, + 0.866 + ], + "angle": 0, + "content": "Attribute-Based Evaluation. To access our method's robustness against target occlusion, we compare ORTrack-" + }, + { + "type": "page_footnote", + "bbox": [ + 0.513, + 0.875, + 0.907, + 0.9 + ], + "angle": 0, + "content": "*Real-time performance applies to platforms similar to or more advanced than ours." + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.12, + 0.09, + 0.877, + 0.105 + ], + "angle": 0, + "content": "Table 2. Precision (Prec.) and speed (FPS) comparison between ORTrack-DeiT and deep-based trackers on VisDrone2018 [98]." + }, + { + "type": "table", + "bbox": [ + 0.115, + 0.116, + 0.885, + 0.187 + ], + "angle": 0, + "content": "
TrackerSourcePrec.Succ.FPSTrackerSourcePrec.Succ.FPSTrackerSourcePrec.Succ.FPS
ORTrack-DeiTOurs88.666.8206.2ZoomTrack [38]NIPS 2381.463.461.7SimTrack [8]ECCV 2280.060.969.7
AQATrack [87]CVPR 2487.266.953.4SeqTrack [10]CVPR 2385.365.815.3ToMP [55]CVPR 2284.164.421.4
HIPTrack [2]CVPR 2486.767.131.3MAT [96]CVPR 2381.662.268.4KeepTrack [54]ICCV 2184.063.520.3
EVPTrack [69]AAAI 2484.565.822.1SparseTT [23]IJCAI 2281.462.130.2SAOT [97]ICCV 2176.959.135.4
ROMTrack [3]ICCV 2386.466.751.1OSTrack [91]ECCV 2284.264.862.7PrDiMP50 [15]CVPR 2079.459.742.6
" + }, + { + "type": "image", + "bbox": [ + 0.094, + 0.209, + 0.48, + 0.38 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.39, + 0.483, + 0.434 + ], + "angle": 0, + "content": "Figure 3. Attribute-based comparison on the partial occlusion subset of VisDrone2018 [98]. ORTrack-DeiT* refers to ORTrack-DeiT without applying the occlusion-robust enhancement." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.46, + 0.483, + 0.657 + ], + "angle": 0, + "content": "DeiT alongside 16 SOTA trackers on the partial occlusion subset of VisDrone2018. Additionally, we also assess the baseline ORTrack-DeiT*, i.e., ORTrack-DeiT without applying the proposed method for learning Occlusion-Robust Representation (ORR), for comparison. The precision plot are presented in Fig. 3, with additional attribute-based evaluation results provided in the supplemental materials. As observed, ORTrack-DeiT achieves the second-highest precision \\((85.0\\%)\\), just slightly behind the first-ranked tracker AQATrack by \\(0.2\\%\\). Remarkably, incorporating the proposed components leads to a significant improvement over ORTrack-DeiT*, with increases of \\(6.9\\%\\) in Prec., well underscoring the effectiveness of our method." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.668, + 0.245, + 0.685 + ], + "angle": 0, + "content": "4.3. Ablation Study" + }, + { + "type": "table_caption", + "bbox": [ + 0.109, + 0.706, + 0.465, + 0.72 + ], + "angle": 0, + "content": "Table 3. Effect of ORR and AFKD on the baseline trackers." + }, + { + "type": "table", + "bbox": [ + 0.117, + 0.732, + 0.46, + 0.855 + ], + "angle": 0, + "content": "
TrackerORRAFKDUAVDTFPS
Prec.Succ.
ORTrack-ViT77.055.6216.2
80.3↑3.358.2↑2.6-
79.1↑2.157.5↑1.9290.3↑34%
ORTrack-Eva78.156.6238.3
80.8↑2.758.7↑2.1-
79.5↑1.457.8↑1.2308.8↑30%
ORTrack-DeiT78.656.7218.4
83.4↑4.860.1↑3.4-
82.5↑3.959.7↑3.0298.7↑36%
" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.871, + 0.483, + 0.903 + ], + "angle": 0, + "content": "Effect of Occlusion-Robust Representations (ORR) and Adaptive Feature-Based Knowledge Distillation" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.212, + 0.908, + 0.53 + ], + "angle": 0, + "content": "(AFKD). To demonstrate the effectiveness of the proposed ORR and AFKD, Table 3 shows the evaluation results on UAVDT dataset as these components are gradually integrated into the baselines. To avoid potential variations due to randomness, we only present the speed of the baseline, since the GPU speeds of the baseline and its ORR-enhanced version are theoretically identical. As can be seen, the incorporation of ORR significantly enhances both Prec. and Succ. for all baseline trackers. Specifically, the Prec. increases for ORTrack-ViT, ORTrack-Eva, and ORTrack-DeiT are \\(3.3\\%\\), \\(2.7\\%\\), and \\(4.8\\%\\), respectively, while the Succ. increases are \\(2.6\\%\\), \\(2.1\\%\\), and \\(3.1\\%\\), respectively. These significant enhancements highlight the effectiveness of ORR in improving tracking precision. The further integration of AFKD results in consistent improvements in GPU speeds, with only slight reductions in Prec. and Succ. Specifically, all baseline trackers experience GPU speed enhancements of over \\(30.0\\%\\), with ORTrack-DeiT showing an impressive \\(36.0\\%\\) improvement. These results affirm the effectiveness of AFKD in optimizing tracking efficiency while maintaining high tracking performance." + }, + { + "type": "table_caption", + "bbox": [ + 0.521, + 0.542, + 0.898, + 0.557 + ], + "angle": 0, + "content": "Table 4. Impact of various Masking Operators on performance." + }, + { + "type": "table", + "bbox": [ + 0.521, + 0.568, + 0.902, + 0.66 + ], + "angle": 0, + "content": "
MethodmUmCSAM[37]AdAutoMix[65]CutMix[92]VisDrone2018
Prec.Succ.
ORTrack-DeiT81.662.2
86.765.4
88.666.8
86.865.6
84.363.8
85.764.2
" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.675, + 0.907, + 0.901 + ], + "angle": 0, + "content": "Effect of Masking Operators. To demonstrate the superiority of the proposed masking operator in terms of performance, we evaluate ORTrack-DeiT with various implementations of masking operators (i.e., \\(\\mathfrak{m}_{\\mathrm{U}}\\), \\(\\mathfrak{m}_{\\mathrm{C}}\\), and SAM [37]) alongside data mixing augmentation methods (i.e., AdAutoMix [65] and CutMix [92]). The evaluation results on VisDrone2018 are presented in Table 4. As shown, although using SAM, AdAutoMix, and CutMix improves performance, the best result achieved with SAM is only comparable to the performance of our \\(\\mathfrak{m}_{\\mathrm{U}}\\) masking operator. When \\(\\mathfrak{m}_{\\mathrm{C}}\\) is applied, the improvements are even more substantial, with increases of \\(7.0\\%\\) and \\(4.6\\%\\), respectively. These results validate the effectiveness of the proposed ORR component and particularly demonstrate the superiority of the masking operator based on spatial Cox processes." + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.091, + 0.089, + 0.486, + 0.12 + ], + "angle": 0, + "content": "Table 5. Impact of the adaptive knowledge distillation loss on the generalizability on LaSOT and TrackingNet." + }, + { + "type": "table", + "bbox": [ + 0.117, + 0.129, + 0.462, + 0.193 + ], + "angle": 0, + "content": "
MethodKDAFKDLaSOTTrackingNet
AUCPnormPAUCPnormP
ORTrack-DeiT53.760.852.672.877.867.1
54.061.253.273.178.467.4
54.662.654.373.779.168.2
" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.214, + 0.485, + 0.397 + ], + "angle": 0, + "content": "Impact of the Adaptive Knowledge Distillation Loss. To assess the impact of the adaptive knowledge distillation loss on generalizability, we train ORTrack-DeiT using GOT-10K with \\(\\varpi (\\mathcal{L}_{iou};\\alpha ,\\beta)\\) and \\(\\varpi (\\mathcal{L}_{iou};\\alpha ,0)\\) separately, then evaluate them on LaSOT and TrackingNet. The results are shown in Table 5. Note that \\(\\varpi (\\mathcal{L}_{iou};\\alpha ,0)\\) degenerates to a non-adaptive knowledge distillation loss as it becomes a constant. As can be seen, AFKD demonstrates greater performance improvements than KD. For instance, using AFKD results in additional gains of over \\(1.1\\%\\) in \\(P_{norm}\\) and \\(P\\) on LaSOT, demonstrating its superior generalizability." + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.407, + 0.484, + 0.438 + ], + "angle": 0, + "content": "Table 6. Application of our ORR component to three SOTA trackers: ARTrack [81], GRM [24], and DropTrack[82]." + }, + { + "type": "table", + "bbox": [ + 0.113, + 0.447, + 0.465, + 0.541 + ], + "angle": 0, + "content": "
TrackerORRUAVDTVisDrone2018
Prec.Succ.Prec.Succ.
ARTrack[81]77.154.677.759.5
78.5↑1.455.8↑1.279.5↑1.860.8↑1.3
GRM[24]79.057.782.763.4
81.7↑1.759.3↑1.684.8↑2.164.6↑1.2
DropTrack[82]76.955.981.562.7
78.7↑1.857.4↑1.582.8↑1.364.2↑1.5
" + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.554, + 0.483, + 0.765 + ], + "angle": 0, + "content": "Application to SOTA trackers. To show the wide applicability of our proposed method, we incorporate the proposed ORR into three existing SOTA trackers: ARTrack [81], GRM [24], and DropTrack [82]. Please note that we replace the model's original backbones with ViT-tiny [18] to reduce training time. As shown in Table 6, incorporating ORR results in significant improvements in both precision and success rates for the three baseline trackers. Specifically, ARTrack, GRM, and DropTrack demonstrate an improvement of more than \\(1.2\\%\\) in both precision and success rate across two datasets. These experimental results demonstrate that the proposed ORR component can be seamlessly integrated into existing tracking frameworks, significantly improving tracking accuracy." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.765, + 0.483, + 0.886 + ], + "angle": 0, + "content": "Qualitative Results. Several qualitative tracking results of ORTrack-DeiT and seven SOTA UAV trackers are shown in Fig. 4. As can be seen, only our tracker successfully tracks the targets in all challenging examples, where pose variations, background clusters, and scale variations are presented. In these cases, our method performs significantly better and is more visually appealing, bolstering the effectiveness of the proposed method for UAV tracking." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.886, + 0.483, + 0.902 + ], + "angle": 0, + "content": "Figure 5 shows attention and feature maps produced by" + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.09, + 0.905, + 0.281 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.287, + 0.908, + 0.331 + ], + "angle": 0, + "content": "Figure 4. Qualitative evaluation on 3 video sequences from, respectively, UAV123 [57], UAVDT [19], and VisDrone2018 [98] (i.e., person9, S1607, and uav0000180_00050_s)." + }, + { + "type": "image", + "bbox": [ + 0.516, + 0.343, + 0.905, + 0.455 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.512, + 0.464, + 0.908, + 0.537 + ], + "angle": 0, + "content": "Figure 5. Visualize the attention map (left) and feature map (right) of the target images. The first row displays the search and masked images with masking ratios of \\(0\\%\\), \\(10\\%\\), \\(30\\%\\), and \\(70\\%\\). The second and third rows show the attention and feature maps generated by ORTrack-DeiT, with and without ORR, respectively." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.542, + 0.909, + 0.62 + ], + "angle": 0, + "content": "ORTrack-DeiT, with and without occlusion-robust enhancement. We observe that ORTrack-DeiT with ORR maintains a clearer focus on the targets and exhibits more consistent feature maps across masking ratios. These results support the effectiveness of our ORR component." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.631, + 0.634, + 0.647 + ], + "angle": 0, + "content": "5. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.657, + 0.907, + 0.837 + ], + "angle": 0, + "content": "In view of the common challenges posed by target occlusion in UAV tracking, in this work, we proposed to learn Occlusion-Robust Representation (ORR) by imposing an invariance of feature representation of the target with respect to random masking modeled by a spatial Cox process. Moreover, we propose an Adaptive Feature-Based Knowledge Distillation (AFKD) to enhance efficiency. Our approach is notably straightforward and can be easily integrated into other tracking frameworks. Extensive experiments across multiple UAV tracking benchmarks validate the effectiveness of our method, demonstrating that our ORTrack-DeiT achieves SOTA performance." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.838, + 0.909, + 0.899 + ], + "angle": 0, + "content": "Acknowledgments. This work was funded by the Guangxi Natural Science Foundation (Grant No. 2024GXNSFAA010484), and the National Natural Science Foundation of China (Nos. 62466013, 62206123)." + } + ], + [ + { + "type": "title", + "bbox": [ + 0.093, + 0.09, + 0.188, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.116, + 0.484, + 0.19 + ], + "angle": 0, + "content": "[1] Wesam A. Askar, Osama Elmowafy, Anca L. Ralescu, Aliaa Abdel-Halim Youssif, and Gamal A. Elnashar. Occlusion detection and processing using optical flow and particle filter. Int. J. Adv. Intell. Paradigms, 15:63-76, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.193, + 0.484, + 0.239 + ], + "angle": 0, + "content": "[2] Wenrui Cai, Qingjie Liu, and Yunhong Wang. Hiptrack: Visual tracking with historical prompts. In CVPR, pages 19258-19267, 2024. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.241, + 0.483, + 0.287 + ], + "angle": 0, + "content": "[3] Yidong Cai, Jie Liu, Jie Tang, and Gangshan Wu. Robust object modeling for visual tracking. In ICCV, pages 9589-9600, 2023. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.289, + 0.483, + 0.348 + ], + "angle": 0, + "content": "[4] Ziang Cao, Changhong Fu, Junjie Ye, Bowen Li, and Yiming Li. Hift: Hierarchical feature transformer for aerial tracking. In ICCV, pages 15457-15466, 2021. 1, 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.352, + 0.483, + 0.412 + ], + "angle": 0, + "content": "[5] Ziang Cao, Ziyuan Huang, Liang Pan, Shiwei Zhang, Ziwei Liu, and Changhong Fu. Ttrack: Temporal contexts for aerial tracking. In CVPR, pages 14798-14808, 2022. 1, 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.414, + 0.483, + 0.459 + ], + "angle": 0, + "content": "[6] Satyaki Chakraborty and Martial Hebert. Learning to track object position through occlusion. ArXiv, abs/2106.10766, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.462, + 0.483, + 0.507 + ], + "angle": 0, + "content": "[7] T-H Chang and Shaogang Gong. Tracking multiple people with a multi-camera system. In Womot, pages 19-26, 2001. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.51, + 0.483, + 0.569 + ], + "angle": 0, + "content": "[8] Boyu Chen, Peixia Li, Lei Bai, Lei Qiao, and et al. Backbone is all your need: a simplified architecture for visual object tracking. In ECCV, pages 375-392, 2022. 1, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.572, + 0.483, + 0.617 + ], + "angle": 0, + "content": "[9] Guobin Chen, Wongun Choi, Xiang Yu, Tony Han, and et al. Learning efficient object detection models with knowledge distillation. NIPS, 30, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.62, + 0.483, + 0.679 + ], + "angle": 0, + "content": "[10] Xin Chen, Houwen Peng, Dong Wang, Huchuan Lu, and Han Hu. Seqtrack: Sequence to sequence learning for visual object tracking. In CVPR, pages 14572-14581, 2023. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.683, + 0.483, + 0.727 + ], + "angle": 0, + "content": "[11] Yuanda Chen. Thinning algorithms for simulating point processes. Florida State University, Tallahassee, FL, 2016. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.731, + 0.483, + 0.79 + ], + "angle": 0, + "content": "[12] Cheng Chi, Shifeng Zhang, Junliang Xing, Zhen Lei, S. Li, and Xudong Zou. Pedhunter: Occlusion robust pedestrian detector in crowded scenes. ArXiv, abs/1909.06826, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.793, + 0.483, + 0.839 + ], + "angle": 0, + "content": "[13] Yutao Cui, Cheng Jiang, and et al. Mixformer: End-to-end tracking with iterative mixed attention. In CVPR, pages 13608-13618, 2022. 1, 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.841, + 0.483, + 0.9 + ], + "angle": 0, + "content": "[14] Martin Danelljan, Goutam Bhat, Fahad Shahbaz Khan, and Michael Felsberg. Eco: Efficient convolution operators for tracking. In CVPR, pages 6638-6646, 2017. 6" + }, + { + "type": "list", + "bbox": [ + 0.094, + 0.116, + 0.484, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.092, + 0.905, + 0.136 + ], + "angle": 0, + "content": "[15] Martin Danelljan, Luc Van Gool, and Radu Timofte. Probabilistic regression for visual tracking. In CVPR, pages 7181-7190, 2020. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.139, + 0.905, + 0.182 + ], + "angle": 0, + "content": "[16] Martin Danelljan, Gustav Hager, Fahad Shahbaz Khan, and et al. Discriminative scale space tracking. IEEE TPAMI, 39(8):1561-1575, 2017. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.184, + 0.905, + 0.244 + ], + "angle": 0, + "content": "[17] Soumen Das, Saroj K. Biswas, and Biswajit Purkayastha. Occlusion robust sign language recognition system for indian sign language using cnn and pose features. Multimed. Tools. Appl, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.246, + 0.905, + 0.304 + ], + "angle": 0, + "content": "[18] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, and et al. An image is worth 16x16 words: Transformers for image recognition at scale. ArXiv, abs/2010.11929, 2020. 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.307, + 0.905, + 0.351 + ], + "angle": 0, + "content": "[19] Dawei Du, Yuankai Qi, Hongyang Yu, and et al. The unmanned aerial vehicle benchmark: Object detection and tracking. In ECCV, pages 375-391, 2018. 5, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.352, + 0.905, + 0.397 + ], + "angle": 0, + "content": "[20] Heng Fan, Liting Lin, Fan Yang, and et al. Lasot: A high-quality benchmark for large-scale single object tracking. In CVPR, pages 5369-5378, 2018. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.399, + 0.905, + 0.442 + ], + "angle": 0, + "content": "[21] Yuxin Fang, Quan Sun, Xinggang Wang, and et al. Eva-02: A visual representation for neon genesis. Image and Vision Computing, 149:105171, 2024. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.444, + 0.905, + 0.488 + ], + "angle": 0, + "content": "[22] Changhong Fu, Xiang Lei, and et al. Progressive representation learning for real-time UAV tracking. In IROS, pages 5072-5079, 2024. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.49, + 0.905, + 0.534 + ], + "angle": 0, + "content": "[23] Zhihong Fu, Zehua Fu, Qingjie Liu, Wenrui Cai, and Yunhong Wang. Sparsett: Visual tracking with sparse transformers. arXiv e-prints, 2022. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.536, + 0.905, + 0.58 + ], + "angle": 0, + "content": "[24] Shenyuan Gao, Chunluan Zhou, and Jun Zhang. Generalized relation modeling for transformer tracking. In CVPR, pages 18686-18695, 2023. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.582, + 0.905, + 0.626 + ], + "angle": 0, + "content": "[25] Goutam Yelluru Gopal and Maria A Amer. Separable self and mixed attention transformers for efficient object tracking. In WACV, pages 6708-6717, 2024. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.628, + 0.905, + 0.671 + ], + "angle": 0, + "content": "[26] Jianping Gou, Baosheng Yu, Stephen J Maybank, and Dacheng Tao. Knowledge distillation: A survey. IJCV, 129(6):1789-1819, 2021. 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.673, + 0.905, + 0.717 + ], + "angle": 0, + "content": "[27] Karthik Hariharakrishnan and Dan Schonfeld. Fast object tracking using adaptive block matching. IEEE TMM, 7:853-859, 2005. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.719, + 0.905, + 0.763 + ], + "angle": 0, + "content": "[28] Kaiming He, Xinlei Chen, Saining Xie, and et al. Masked autoencoders are scalable vision learners. In CVPR, pages 15979-15988, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.765, + 0.905, + 0.808 + ], + "angle": 0, + "content": "[29] João F. Henriques, Rui Caseiro, Pedro Martins, and et al. High-speed tracking with kernelized correlation filters. IEEE TPAMI, 37:583-596, 2015. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.811, + 0.905, + 0.855 + ], + "angle": 0, + "content": "[30] L. Huang, X. Zhao, and K. Huang. Got-10k: A large high-diversity benchmark for generic object tracking in the wild. IEEE TPAMI, (5), 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.857, + 0.905, + 0.902 + ], + "angle": 0, + "content": "[31] Ziyuan Huang, Changhong Fu, and et al. Learning aberrance repressed correlation filters for real-time uav tracking. In ICCV, pages 2891-2900, 2019. 2" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.092, + 0.905, + 0.902 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.091, + 0.486, + 0.137 + ], + "angle": 0, + "content": "[32] Janine Illian, Antti Penttinen, Helga Stoyan, and Dieterich Stoyan. Statistical analysis and modelling of spatial point patterns. John Wiley & Sons, 2008. 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.139, + 0.483, + 0.183 + ], + "angle": 0, + "content": "[33] Michal Irani and Shmuel Peleg. Motion analysis for image enhancement: Resolution, occlusion, and transparency. JVCIR, 4(4):324-335, 1993. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.186, + 0.482, + 0.245 + ], + "angle": 0, + "content": "[34] Dippal Israni and Hiren K. Mewada. Feature descriptor based identity retention and tracking of players under intense occlusion in soccer videos. Int. J. Intell. Eng. Syst, 2018. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.248, + 0.482, + 0.292 + ], + "angle": 0, + "content": "[35] Minyang Jiang and et al. Occlusion-robust fau recognition by mining latent space of masked autoencoders. Neurocomputing, 569:127107, 2024. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.295, + 0.482, + 0.338 + ], + "angle": 0, + "content": "[36] Jung Uk Kim, Ju Won Kwon, and et al. BBC net: Bounding-box critic network for occlusion-robust object detection. IEEE TCSVT, 30:1037-1050, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.341, + 0.482, + 0.371 + ], + "angle": 0, + "content": "[37] Alexander Kirillov, Eric Mintun, and et al. Segment anything. In ICCV, pages 4015-4026, 2023. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.373, + 0.482, + 0.417 + ], + "angle": 0, + "content": "[38] Yutong Kou, Jin Gao, Bing Li, and et al. Zoomtrack: Target-aware non-uniform resizing for efficient visual tracking. NIPS, 36:50959-50977, 2023. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.42, + 0.482, + 0.464 + ], + "angle": 0, + "content": "[39] Thijs P. Kuipers, Devanshu Arya, and Deepak K. Gupta. Hard occlusions in visual object tracking. In ECCV Workshops, 2020. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.467, + 0.482, + 0.496 + ], + "angle": 0, + "content": "[40] Hei Law and Jia Deng. Cornernet: Detecting objects as paired keypoints. IJCV, 128:642-656, 2018. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.499, + 0.482, + 0.543 + ], + "angle": 0, + "content": "[41] Luming Li, Chenglizhao Chen, and Xiaowei Zhang. Mask-guided self-distillation for visual tracking. In ICME, pages 1-6, 2022. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.546, + 0.482, + 0.605 + ], + "angle": 0, + "content": "[42] Shuiwang Li, Yuting Liu, Qijun Zhao, and Ziliang Feng. Learning residue-aware correlation filters and refining scale for real-time uav tracking. Pattern Recognition, 127:108614, 2022. 1, 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.607, + 0.482, + 0.651 + ], + "angle": 0, + "content": "[43] Shuiwang Li, Xiangyang Yang, and et al. Learning target-aware vision transformers for real-time uav tracking. IEEE TGRS, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.654, + 0.482, + 0.713 + ], + "angle": 0, + "content": "[44] Shuiwang Li, Yangxiang Yang, Dan Zeng, and Xucheng Wang. Adaptive and background-aware vision transformer for real-time uav tracking. In ICCV, pages 13943-13954, 2023. 1, 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.716, + 0.482, + 0.76 + ], + "angle": 0, + "content": "[45] Siyi Li and D. Y. Yeung. Visual object tracking for unmanned aerial vehicles: A benchmark and new motion models. In AAAI, 2017. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.763, + 0.482, + 0.822 + ], + "angle": 0, + "content": "[46] Yiming Li, Changhong Fu, Fangqiang Ding, and et al. Autotrack: Towards high-performance visual tracking for uav with automatic spatio-temporal regularization. In CVPR, pages 11920-11929, 2020. 1, 2, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.825, + 0.482, + 0.869 + ], + "angle": 0, + "content": "[47] Yongxin Li, Mengyuan Liu, You Wu, and et al. Learning adaptive and view-invariant vision transformer for real-time uav tracking. In ICML, 2024. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.871, + 0.482, + 0.901 + ], + "angle": 0, + "content": "[48] Tsung Yi Lin, Michael Maire, and et al. Microsoft coco: Common objects in context. In ECCV, 2014. 6" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.091, + 0.486, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.091, + 0.905, + 0.137 + ], + "angle": 0, + "content": "[49] Mengyuan Liu, Yuelong Wang, and et al. Global filter pruning with self-attention for real-time uav tracking. In BMVC, page 861, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.139, + 0.905, + 0.183 + ], + "angle": 0, + "content": "[50] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.186, + 0.905, + 0.228 + ], + "angle": 0, + "content": "[51] David G Lowe. Object recognition from local scale-invariant features. In ICCV, pages 1150-1157, 1999. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.232, + 0.905, + 0.277 + ], + "angle": 0, + "content": "[52] Siyu Ma, Yuting Liu, and et al. Learning disentangled representation in pruning for real-time uav tracking. In ACML, pages 690-705, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.279, + 0.905, + 0.322 + ], + "angle": 0, + "content": "[53] Torsten Mattfeldt. Stochastic geometry and its applications. Journal of Microscopy, 183:257-257, 1996. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.326, + 0.905, + 0.37 + ], + "angle": 0, + "content": "[54] Christoph Mayer, Martin Danelljan, and et al. Learning target candidate association to keep track of what not to track. In ICCV, pages 13424-13434, 2021. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.372, + 0.905, + 0.416 + ], + "angle": 0, + "content": "[55] Christoph Mayer, Martin Danelljan, and et al. Transforming model prediction for tracking. In CVPR, pages 8721-8730, 2022. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.419, + 0.905, + 0.463 + ], + "angle": 0, + "content": "[56] Matthias Mueller, Adel Bibi, and et al. Trackingnet: A large-scale dataset and benchmark for object tracking in the wild. In ECCV, pages 300-317, 2018. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.466, + 0.905, + 0.51 + ], + "angle": 0, + "content": "[57] Matthias Mueller, Neil G. Smith, and Bernard Ghanem. A benchmark and simulator for uav tracking. In ECCV, 2016. 5, 6, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.512, + 0.905, + 0.556 + ], + "angle": 0, + "content": "[58] Hieu Tat Nguyen and Arnold W. M. Smeulders. Fast occluded object tracking by a robust appearance filter. IEEE TPAMI, 26:1099-1104, 2004. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.559, + 0.905, + 0.603 + ], + "angle": 0, + "content": "[59] Hieu Tat Nguyen, Marcel Worring, and Rein van den Boomgaard. Occlusion robust adaptive template tracking. In ICCV, pages 678-683, 2001. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.606, + 0.899, + 0.62 + ], + "angle": 0, + "content": "[60] Toby C. O’Neil. Geometric measure theory. 2002. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.623, + 0.905, + 0.651 + ], + "angle": 0, + "content": "[61] Jiyan Pan and Bo Hu. Robust occlusion handling in object tracking. In CVPR, pages 1-8, 2007. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.654, + 0.905, + 0.698 + ], + "angle": 0, + "content": "[62] Joo Hyun Park, Yeong Min Oh, and et al. Handoccnet: Occlusion-robust 3d hand mesh estimation network. In CVPR, pages 1486–1495, 2022. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.7, + 0.905, + 0.73 + ], + "angle": 0, + "content": "[63] Wonpyo Park and et al. Relational knowledge distillation. In CVPR, pages 3962-3971, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.732, + 0.905, + 0.776 + ], + "angle": 0, + "content": "[64] Zhimao Peng, Zechao Li, Junge Zhang, and et al. Few-shot image recognition with knowledge transfer. In ICCV, pages 441-449, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.779, + 0.905, + 0.822 + ], + "angle": 0, + "content": "[65] Huafeng Qin, Xin Jin, Yun Jiang, Mounim A El-Yacoubi, and Xinbo Gao. Adversarial automixup. arXiv preprint arXiv:2312.11954, 2023. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.825, + 0.905, + 0.869 + ], + "angle": 0, + "content": "[66] Delin Qu, Yizhen Lao, and et al. Towards nonlinear-motion-aware and occlusion-robust rolling shutter correction. ICCV, pages 10646–10654, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.871, + 0.905, + 0.901 + ], + "angle": 0, + "content": "[67] Seyed Hamid Rezatofighi, Nathan Tsoi, JunYoung Gwak, and et al. Generalized intersection over union:" + }, + { + "type": "list", + "bbox": [ + 0.516, + 0.091, + 0.905, + 0.901 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.128, + 0.092, + 0.482, + 0.122 + ], + "angle": 0, + "content": "A metric and a loss for bounding box regression. \nCVPR, pages 658-666, 2019. 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.125, + 0.483, + 0.169 + ], + "angle": 0, + "content": "[68] Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, and et al. Imagenet large scale visual recognition challenge. IJCV, 115:211 - 252, 2014. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.173, + 0.482, + 0.218 + ], + "angle": 0, + "content": "[69] Liangtao Shi, Bineng Zhong, Qihua Liang, Ning Li, Shengping Zhang, and Xianxian Li. Explicit visual prompts for visual object tracking. In AAAI, 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.221, + 0.482, + 0.28 + ], + "angle": 0, + "content": "[70] Abhinav Shrivastava, Abhinav Kumar Gupta, and Ross B. Girshick. Training region-based object detectors with online hard example mining. In CVPR, pages 761-769, 2016. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.284, + 0.482, + 0.328 + ], + "angle": 0, + "content": "[71] Markus Storer and et al. Active appearance model fitting under occlusion using fast-robust pca. In VISAPP, pages 129–136, 2009. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.332, + 0.482, + 0.391 + ], + "angle": 0, + "content": "[72] Chen Sun and et al. Siamohot: A lightweight dual siamese network for onboard hyperspectral object tracking via joint spatial-spectral knowledge distillation. IEEE TGRS, 61:1-12, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.395, + 0.482, + 0.44 + ], + "angle": 0, + "content": "[73] Hugo Touvron and et al. Training data-efficient image transformers & distillation through attention. In ICML, pages 10347-10357, 2021. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.443, + 0.482, + 0.488 + ], + "angle": 0, + "content": "[74] Wenxuan Tu, Sihang Zhou, and et al. Hierarchically contrastive hard sample mining for graph self-supervised pretraining. IEEE TNNLS, PP, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.491, + 0.482, + 0.534 + ], + "angle": 0, + "content": "[75] Frederick Tung and Greg Mori. Similarity-preserving knowledge distillation. In ICCV, pages 1365-1374, 2019. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.539, + 0.482, + 0.582 + ], + "angle": 0, + "content": "[76] K. Wang and et al. Region attention networks for pose and occlusion robust facial expression recognition. IEEE TIP, 29:4057-4069, 2019. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.587, + 0.482, + 0.631 + ], + "angle": 0, + "content": "[77] Keze Wang and et al. Towards human-machine cooperation: Self-supervised sample mining for object detection. In CVPR, pages 1605-1613, 2018. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.634, + 0.482, + 0.693 + ], + "angle": 0, + "content": "[78] Lin Wang and Kuk-Jin Yoon. Knowledge distillation and student-teacher learning for visual intelligence: A review and new outlooks. IEEE TPAMI, 44:3048-3068, 2020. 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.698, + 0.482, + 0.757 + ], + "angle": 0, + "content": "[79] Xucheng Wang, Xiangyang Yang, and et al. Learning disentangled representation with mutual information maximization for real-time uav tracking. In ICME, pages 1331-1336, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.761, + 0.482, + 0.805 + ], + "angle": 0, + "content": "[80] Xucheng Wang, Dan Zeng, Qijun Zhao, and Shuiwang Li. Rank-based filter pruning for real-time uav tracking. In ICME, pages 01-06, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.809, + 0.482, + 0.838 + ], + "angle": 0, + "content": "[81] Xing Wei, Yifan Bai, and et al. Autoregressive visual tracking. In CVPR, pages 9697-9706, 2023. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.842, + 0.482, + 0.9 + ], + "angle": 0, + "content": "[82] Qiangqiang Wu, Tianyu Yang, and et al. Dropmae: Masked autoencoders with spatial-attention dropout for tracking tasks. In CVPR, pages 14561-14571, 2023. 1, 8" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.092, + 0.483, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.092, + 0.905, + 0.136 + ], + "angle": 0, + "content": "[83] Wanying Wu, Pengzhi Zhong, and Shuiwang Li. Fisher pruning for real-time uav tracking. In IJCNN, pages 1-7, 2022. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.139, + 0.905, + 0.197 + ], + "angle": 0, + "content": "[84] You Wu, Xucheng Wang, Dan Zeng, and et al. Learning motion blur robust vision transformers with dynamic early exit for real-time uav tracking. arXiv preprint arXiv:2407.05383, 2024.1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.2, + 0.905, + 0.243 + ], + "angle": 0, + "content": "[85] Fei Xie, Chunyu Wang, and et al. Learning tracking representations via dual-branch fully transformer networks. In ICCV, pages 2688–2697, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.246, + 0.905, + 0.288 + ], + "angle": 0, + "content": "[86] Fei Xie, Chunyu Wang, Guangting Wang, and et al. Correlation-aware deep tracking. In CVPR, pages 8741-8750, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.291, + 0.905, + 0.335 + ], + "angle": 0, + "content": "[87] Jinxia Xie and et al. Autoregressive queries for adaptive tracking with spatio-temporal transformers. In CVPR, pages 19300-19309, 2024. 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.337, + 0.905, + 0.381 + ], + "angle": 0, + "content": "[88] Di Yang and et al. Self-supervised video pose representation learning for occlusion- robust action recognition. In AFGR, pages 1-5, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.384, + 0.905, + 0.427 + ], + "angle": 0, + "content": "[89] Xiangyang Yang, Dan Zeng, and et al. Adaptively bypassing vision transformer blocks for efficient visual tracking. Pattern Recognition, 161:111278, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.429, + 0.905, + 0.473 + ], + "angle": 0, + "content": "[90] Liangliang Yao, Changhong Fu, and et al. Sgdvit: Saliency-guided dynamic vision transformer for uav tracking. arXiv preprint arXiv:2303.04378, 2023. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.476, + 0.905, + 0.519 + ], + "angle": 0, + "content": "[91] Botao Ye, Hong Chang, and et al. Joint feature learning and relation modeling for tracking: A one-stream framework. In ECCV, pages 341-357, 2022. 1, 2, 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.521, + 0.905, + 0.578 + ], + "angle": 0, + "content": "[92] Sangdoo Yun, Dongyoon Han, and et al. Cutmix: Regularization strategy to train strong classifiers with localizable features. In ICCV, pages 6023-6032, 2019. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.581, + 0.905, + 0.641 + ], + "angle": 0, + "content": "[93] Dan Zeng, Mingliang Zou, Xucheng Wang, and Shuiwang Li. Towards discriminative representations with contrastive instances for real-time uav tracking. In ICME, pages 1349-1354, 2023. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.643, + 0.905, + 0.687 + ], + "angle": 0, + "content": "[94] Chenyuan Zhang, Jiu Xu, and et al. A klt-based approach for occlusion handling in human tracking. In PCS, pages 337-340, 2012. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.689, + 0.905, + 0.733 + ], + "angle": 0, + "content": "[95] Yi Zhang, Pengliang Ji, and et al. 3d-aware neural body fitting for occlusion robust 3d human pose estimation. ICCV, pages 9365-9376, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.735, + 0.905, + 0.792 + ], + "angle": 0, + "content": "[96] Haojie Zhao, Dong Wang, and Huchuan Lu. Representation learning for visual object tracking by masked appearance transfer. In CVPR, pages 18696-18705, 2023. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.795, + 0.905, + 0.839 + ], + "angle": 0, + "content": "[97]Zikun Zhou, Wenjie Pei, Xin Li, and et al. Saliencyassociated object tracking. In ICCV, pages 9846- 9855,2021.7" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.841, + 0.905, + 0.9 + ], + "angle": 0, + "content": "[98] Pengfei Zhu, Longyin Wen, and et al. Visdrone-vdt2018: The vision meets drone video detection and tracking challenge results. In ECCV Workshops, 2018. 5, 6, 7, 8" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.092, + 0.905, + 0.9 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09228/4d91038b-08d1-447d-8037-8ee551148a4c_origin.pdf b/data/2025/2504_09xxx/2504.09228/4d91038b-08d1-447d-8037-8ee551148a4c_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..4b6065a2449489a36992a2b2300cd72b6b67c72b --- /dev/null +++ b/data/2025/2504_09xxx/2504.09228/4d91038b-08d1-447d-8037-8ee551148a4c_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef6536aa9b59396c2fa6012a764cf84dd82ab6786b50ddcf2e717a6b64b5868d +size 7355895 diff --git a/data/2025/2504_09xxx/2504.09228/full.md b/data/2025/2504_09xxx/2504.09228/full.md new file mode 100644 index 0000000000000000000000000000000000000000..4a466d053bbb9f7a7c6014188a4055db874e84f3 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09228/full.md @@ -0,0 +1,300 @@ +# Learning Occlusion-Robust Vision Transformers for Real-Time UAV Tracking + +You Wu $^{1\dagger}$ , Xucheng Wang $^{2\dagger}$ , Xiangyang Yang $^{1}$ , Mengyuan Liu $^{1}$ , Dan Zeng $^{3}$ , Hengzhou Ye $^{1}$ , Shuiwang Li $^{1*}$ + +1College of Computer Science and Engineering, Guilin University of Technology, China + +$^{2}$ School of Computer Science, Fudan University, Shanghai, China + +$^{3}$ School of Artificial Intelligence, Sun Yat-sen University, Zhuhai, China + +wuyou@glut.edu.cn, xcwang317@glut.edu.cn, xyyang317@163.com, mengyuaner1122@foxmail.com, zengd8@mail.sysu.edu.cn, yehengzhou@glut.edu.cn, lishuiwang0721@163.com + +# Abstract + +Single-stream architectures using Vision Transformer (ViT) backbones show great potential for real-time UAV tracking recently. However, frequent occlusions from obstacles like buildings and trees expose a major drawback: these models often lack strategies to handle occlusions effectively. New methods are needed to enhance the occlusion resilience of single-stream ViT models in aerial tracking. In this work, we propose to learn Occlusion-Robust Representations (ORR) based on ViTs for UAV tracking by enforcing an invariance of the feature representation of a target with respect to random masking operations modeled by a spatial Cox process. Hopefully, this random masking approximately simulates target occlusions, thereby enabling us to learn ViTs that are robust to target occlusion for UAV tracking. This framework is termed ORTrack. Additionally, to facilitate real-time applications, we propose an Adaptive Feature-Based Knowledge Distillation (AFKD) method to create a more compact tracker, which adaptively mimics the behavior of the teacher model ORTrack according to the task's difficulty. This student model, dubbed ORTrack-D, retains much of ORTrack's performance while offering higher efficiency. Extensive experiments on multiple benchmarks validate the effectiveness of our method, demonstrating its state-of-the-art performance. Codes is available at https://github.com/wuyou3474/ORTrack. + +# 1. Introduction + +Unmanned aerial vehicles (UAVs) are leveraged in a plethora of applications, with increasing emphasis on UAV tracking [4, 43, 46, 49, 52, 79, 84]. This form of tracking poses an exclusive set of challenges such as tricky viewing angles, motion blur, severe occlusions, and the need for + +![](images/24a5d9de226bc27e8074d66ea960e0b8fbefd81096aea0e59ab84594da256ccc.jpg) +Figure 1. Compared to SOTA UAV trackers on UAVDT, our ORTrack-DeiT sets a new record with $83.4\%$ precision and a speed of 236 FPS. Our ORTrack-D-DeiT strikes a better trade-off with $82.5\%$ precision and a speed of about 313 FPS. + +efficiency due to UAVs' restricted battery life and computational resources [5, 42, 80, 83]. Consequently, designing an effective UAV tracker requires a delicate balance between precision and efficiency. It needs to ensure accuracy while being conscious of the UAV's energy and computational constraints. + +In recent years, there has been a notable shift from discriminative correlation filters (DCF)-based methods, because of their unsatisfactory robustness, towards DL-based approaches, particularly with the adoption of single-stream architectures that integrate feature extraction and fusion via pre-trained Vision Transformer (ViT) backbone networks. This single-stream paradigm has proven highly effective in generic visual tracking, as evidenced by the success of recent methods such as OSTrack [91], SimTrack [8], Mixformer [13], and DropMAE [82]. Building on these advancements, Aba-VTrack [44] introduces a lightweight DL-based tracker within this framework, employing an adap + +tive and background-aware token computation method to enhance inference speed, which demonstrates remarkable precision and speed for real-time UAV tracking. However, the use of a variable number of tokens in Aba-VTrack incurs significant time costs, primarily due to the unstructured access operations required during inference. Adding to this, it also grappled with establishing robustness when facing target occlusion, a challenge common in UAV tracking often triggered by obstructive elements like buildings, mountains, trees, and so forth. The problem is exacerbated by the fact that UAVs may not always be capable of circumventing these impediments due to potential large-scale movements involved. + +To address these issues, we introduce a novel framework designed to enhance the occlusion robustness of ViTs for UAV tracking. Our approach, termed ORTrack, aims to learn ViT-based trackers that maintain robust feature representations even in the presence of target occlusion. This is achieved by enforcing an invariance in the feature representation of the target with respect to random masking operations modeled by a spatial Cox process. The random masking serves as a simulation of target occlusion, which is expected to mimic real occlusion challenges in UAV tracking and aid in learning Occlusion-Robust Representations (ORR). Notably, our method for learning occlusion-robust representation simply uses a Mean Squared Error (MSE) loss during training, adding no extra computational load during inference. Additionally, to enhance efficiency for real-time applications, we introduce an Adaptive Feature-Based Knowledge Distillation (AFKD) method. This method creates a more compact tracker, named ORTrack-D, which adaptively mimics the behavior of the teacher model ORTrack based on the complexity of the tracking task during training. The reasoning is that the teacher model, in its pursuit of powerful representations, may compromise its generalizability. Hence, in situations where generalizability is vital, the student model may perform better, and closely mimicking the teacher's behavior becomes less important. We use the deviation of GIoU loss [67] from its average value to quantify the difficulty of the tracking task, which makes sense as loss value is a commonly used criteria to define hard samples [70, 74, 77]. ORTrack-D maintains much of ORTrack's performance with higher efficiency, making it better suited for deployment in resource-constrained environments typical of UAV applications. Extensive experiments on four benchmarks show that our method achieves state-of-the-art performance. + +In summary, our contributions are as follows: (i) We propose to learn Occlusion-Robust Representations (ORR) by imposing an invariance in the feature representation of the target with respect to random masking operations modeled by a spatial Cox process, which can be easily integrated into other tracking frameworks without requiring additional ar + +chitectures or increasing inference time; (ii) We propose an Adaptive Feature-Based Knowledge Distillation (AFKD) method to further enhance efficiency, in which the student model adaptively mimics the behavior of the teacher model according to the task's difficulty, resulting in a significant increase in tracking speed while only minimally reducing accuracy; (iii) We introduce ORTrack, a family of efficient trackers based on these components, which integrates seamlessly with other ViT-based trackers. ORTrack demonstrates superior performance while maintaining extremely fast tracking speeds. Extensive evaluations show that ORTrack achieves state-of-the-art real-time performance. + +# 2. Related work + +# 2.1. Visual Tracking. + +In visual tracking, the primary approaches consist of DCF-based and DL-based trackers. DCF-based trackers are favored for UAV tracking due to their remarkable efficiency, but they face difficulties in maintaining robustness under complex conditions [31, 42, 46]. Recently developed lightweight DL-based trackers have improved tracking precision and robustness for UAV tracking [4, 5]; however, their efficiency lags behind that of most DCF-based trackers. Model compression techniques like those in [80, 83] have been used to further boost efficiency, yet these trackers still face issues with tracking precision. Vision Transformers (ViTs) are gaining traction for streamlining and unifying frameworks in visual tracking, as seen in studies like [13, 85, 86, 89, 91]. While these frameworks are compact and efficient, few are based on lightweight ViTs, making them impractical for real-time UAV tracking. To address this, Aba-ViTrack [44] used lightweight ViTs and an adaptive, background-aware token computation method to enhance efficiency for real-time UAV tracking. However, the variable token number in this approach necessitates unstructured access operations, leading to significant time costs. In this work, we aim to improve the efficiency of ViTs for UAV tracking through knowledge distillation, a more structured method. + +# 2.2. Occlusion-Robust Feature Representation. + +Occlusion-robust feature representation is crucial in computer vision and image processing. It involves developing methods that can recognize and process objects in images even when parts are hidden or occluded [62, 76]. Early efforts often relied on handcrafted features, active appearance models, motion analysis, sensor fusion, etc [7, 33, 51, 71]. While effective in some cases, these methods struggled with the complexity and variability of real-world visual data. The advent of deep learning revolutionized the field. Many studies have applied Convolutional Neural Networks (CNNs) and other deep architectures to + +extract occlusion-robust representations [35, 62, 66, 76]. These approaches use deep models to capture complex patterns and variations in visual data, making learned features resilient to occlusions and having proven valuable for many computer vision applications, such as action recognition [17, 88], pose estimation [62, 95], and object detection [12, 36]. The exploration of occlusion-robust representations in visual tracking has also demonstrated great success [1, 6, 27, 34, 39, 58, 59, 61, 94]. However, to our knowledge, there is a dearth of research to explore learning occlusion-robust ViTs particularly in a unified framework for UAV tracking. In this study, we delve into the exploration of learning occlusion-robust feature representations based on ViTs by simulating occlusion challenges using random masking modeled by a spatial Cox process, specifically tailored for UAV tracking. This study represents the first use of ViTs for acquiring occlusion-robust feature representations in UAV tracking. + +# 2.3. Knowledge Distillation. + +Knowledge distillation is a technique used to compress models by transferring knowledge from a complex "teacher" model to a simpler "student" model, with the aim of maintaining performance while reducing computational resources and memory usage [63, 75]. It involves various types of knowledge, distillation strategies, and teacher-student architectures, typically falling into three categories: response-based, feature-based, and relation-based distillation [26, 63, 78]. Widely applied in tasks such as image classification [64], object detection [9], and neural machine translation [42], it offers potential to improve the efficiency and even effectiveness of deep learning models. Recently, it has been successfully utilized to enhance the efficiency of DL-based trackers. For instance, Li et al. [41] used mask-guided self-distillation to compress Siamese-based visual trackers. Sun et al. [72] introduced a lightweight dual Siamese tracker for hyperspectral object tracking, using a spatial-spectral knowledge distillation method to learn from a deep tracker. However, these techniques are mainly Siamese-based and tailored to specific tracking frameworks, posing challenges for adaptation to our ViT-based approach. In this study, we propose a simple yet effective feature-based knowledge distillation method, in which the student adaptively replicate the behavior of the teacher based on the complexity of the tracking task during training. + +# 3. Method + +In this section, we first provide a brief overview of our end-to-end tracking framework, named ORTrack, as shown in Figure 2. Then, we introduce the occlusion-robust representation learning based on spatial Cox processes and the method of adaptive knowledge distillation. Finally, we detail the prediction head and training loss. + +# 3.1. Overview + +The proposed ORTrack introduces an novel single-stream tracking framework, featuring a spatial Cox process-based masking for occlusion-robust representation learning and an adaptive feature-based knowledge distillation pipeline. ORTrack consists of two sequential training phases: the teacher model training pipeline for learning occlusion-robust representations, followed by the student training pipeline involving adaptive knowledge distillation. In the teacher model training phase, the input includes a target template $Z \in \mathbb{R}^{3 \times H_z \times W_z}$ of spatial size $H_z \times W_z$ , a randomly masked target template $Z' = \mathfrak{m}(Z)$ , and a search image $X \in \mathbb{R}^{3 \times H_x \times W_x}$ of spatial size $H_x \times W_x$ , where $\mathfrak{m}(\cdot)$ represents the random masking operation that masks out non-overlap patches of size $b \times b$ with a certain masking ratio $\sigma$ . To achieve occlusion-robust representation with ViTs, we minimize the mean squared error (MSE) between two versions of the template representation: one with random masking and one without. During the training of the student model, the teacher's weights remain fixed while both the teacher and student models receive inputs $Z$ and $X$ . Let $\mathfrak{B}_T$ and $\mathfrak{B}_S$ represent the backbones of the teacher and student, respectively. In our implementation, $\mathfrak{B}_T$ and $\mathfrak{B}_S$ share the same structure of the ViT layer but differ in the number of layers. Feature-based knowledge distillation is used to transfer the knowledge embedded in the teacher model's backbone features to the student model through an adaptive distillation loss. + +# 3.2. Occlusion-Robust Representations (ORR) Based on Spatial Cox Processes + +To begin, we describe two random masking operations used to simulate occlusion challenges: one from MAE [28] and our proposed method based on a Spatial Cox process, denoted by $\mathfrak{m}_{\mathrm{U}}$ and $\mathfrak{m}_{\mathrm{C}}$ , respectively. Although $\mathfrak{m}_{\mathrm{U}}$ allows the model to learn robust representations that are less sensitive to noise or missing information by randomly ignoring certain parts of the input data during training [28], it is less effective when used to simulate occlusion since each spatial position (in the sense of block size) is masked out with equal probability, especially in our situation where the target template generally contains background. To ensure that the target is masked out as expected with higher probabilities at a given masking ratio, thereby making the occlusion simulation more effective, we employ a finite Cox process [32] to model this masking operation, which is detailed as follows. + +Define two associated random matrices $\mathbf{m} = (m_{i,j})$ , $\mathbf{b} = (b_{i,j})$ , $1 \leqslant i \leqslant H_z / b$ , $1 \leqslant j \leqslant W_z / b$ , where $m_{i,j} \sim \mathcal{U}(0,1)$ (i.e., $m_{i,j}$ follows a uniform distribution over the interval [0, 1]), $b_{i,j} \in \{0,1\}$ equals 1 if $m_{i,j} \in \mathrm{TopK}(\mathbf{m}, K)$ , and 0 otherwise. $\mathrm{TopK}(\mathbf{m}, K)$ returns the $K = \lfloor (1 - \sigma)H_zW_z \rfloor$ largest elements from $\mathbf{m}$ , where $\lfloor x \rfloor$ rounds $x$ to + +![](images/4082fd996ec64079d2abc075f06f60d7af465a0f16f8a18689b4c86819470823.jpg) +Figure 2. Overview of the proposed ORTrack framework, which includes separate training pipelines for a teacher and a student model. Note that the spatial Cox process-based masking and occlusion-robust representation learning are applied only in the teacher pipeline. Once the teacher is trained, its weights are fixed for training the student model with the proposed adaptive knowledge distillation. + +the nearest integer. Mathematically, $\mathfrak{m}_{\mathrm{U}}(Z) = Z\odot (\mathbf{b}\otimes \mathbf{1})$ where $\odot$ denotes the Hadamard product and $\otimes$ denotes the tensor product, $\mathbf{1}$ is an all-ones matrix of size $b\times b$ . Before defining $\mathfrak{m}_{\mathrm{C}}$ , we establish core notations relevant to spatial Cox processes. It extends the concept of spatial inhomogeneous Poisson point processes by incorporating a random intensity function, which, in turn, is defined as a Poisson point process with an intensity determined by a location-dependent function in the underlying space. For Euclidean space $\mathbb{R}^2$ , an inhomogeneous Poisson point process is defined by a locally integrable positive intensity function $\lambda \colon \mathbb{R}^2\to [0,\infty)$ , such that for every bounded region $\mathcal{B}$ the integral $\Lambda (\mathcal{B}) = \int_{\mathcal{B}}\lambda (x,y)\mathrm{d}xdy$ is finite, where $\Lambda (\mathcal{B})$ has the interpretation of being the expected number of points of the Poisson process located in $\mathcal{B}$ , and for every collection of disjoint bounded Borel measurable sets $\mathcal{B}_1,\dots,\mathcal{B}_k$ [60], its number distributions is defined by $\operatorname*{Pr}\{\mathrm{N}(\mathcal{B}_i) = n_i,i = 1,\ldots ,k\} = \prod_{i = 1}^{k}\frac{(\Lambda(\mathcal{B}_i))^{n_i}}{n_i!} e^{-\Lambda (\mathcal{B}_i)}$ , $n_i\in \mathbb{Z}^{0 + }$ , where $\operatorname*{Pr}$ denotes the probability measure, $\mathrm{N}$ indicates the random counting measure such that $\Lambda (\mathcal{B}) = \mathbb{E}[\mathrm{N}(\mathcal{B})]$ , $\mathbb{E}$ is the expectation operator. In particular, the conditional distribution of the points in a bounded set $\mathcal{B}$ given that $\mathrm{N}(\mathcal{B}) = n\in \mathbb{Z}^{0 + }$ is not uniform, and $f_{n}(p_{1},\dots,p_{n}) = \prod_{n}^{i = 1}\frac{\lambda(p_{i})}{\Lambda(\mathcal{B})}$ , $p_1,\dots,p_n\in \mathcal{B}$ defines the corresponding location density function of the $n$ points. Since a Cox process can be regarded as the result of a two-stage random mechanism for which it is sometimes termed 'doubly stochastic Poisson process' [32], the finite Cox processes can be simulated in a straightforward way based on the hierarchical nature of the model. Specifically, in the first step, the intensity $\lambda (x,y)$ is generated. In the second step, an in + +homogeneous Poisson point process is simulated using the generated $\lambda(x, y)$ [32, 53]. The thinning algorithm [11] is used here for simulating inhomogeneous Poisson point processes. It involves simulating a homogeneous Poisson point process with a higher rate than the maximum possible rate of the inhomogeneous process, and then "thinning" out the generated points to match the desired intensity function. + +In this work, the randomness of the intensity function is modeled by a random variable $\Gamma$ that has a Poisson distribution with expectation of $\varsigma$ , namely, $\operatorname{Pr}\{\Gamma = k\} = \frac{\varsigma^k e^{-\varsigma}}{k!}$ where $k \in \mathbb{Z}^{0+}$ . The intensity function of the inhomogeneous Poisson point process is then given by + +$$ +\lambda (x, y) = \frac {\Gamma e ^ {- (x ^ {2} + y ^ {2})}}{\int_ {\mathcal {B}} e ^ {- (x ^ {2} + y ^ {2})} d x d y}. \tag {1} +$$ + +Note that $\lambda(x, y)$ is a bell-shape function that gives more intensities to the central area of $\mathcal{B}$ . Let $\mathcal{B}$ denote the rectangle region of size $H_z / b \times W_z / b$ representing the template region. If we simulate the Cox process within $\mathcal{B}$ and denote a resulted point pattern by $\Xi$ , we can obtain a matrix $\mathbf{b}' = (b_{i,j}')_{1 \leqslant i \leqslant H_z / b, 1 \leqslant i \leqslant W_z / b}$ , where $b_{i,j}'$ equals 1 if $(i, j) \in \Xi$ , and 0 otherwise, with which our $\mathfrak{m}_{\mathbb{C}}$ can be defined as $\mathfrak{m}_{\mathbb{C}}(Z) = Z \odot (\mathbf{b}' \otimes \mathbf{1})$ . It is worthy of note that if $\varsigma = [(1 - \sigma)H_zW_z]$ , since $\mathbb{E}[\Lambda(\mathcal{B})] = \mathbb{E}[\int_{\mathcal{B}} \lambda(x, y) dxdy] = \mathbb{E}[\Gamma] = \varsigma$ , in this case, the expected masking ratio of our masking operation is equal to the masking ratio of $\mathfrak{m}_{\mathbb{C}}$ . Thus, in addition to inhomogeneous intensity, our method can simulate more diverse pattern of occlusion due to the introduced randomness of the masking ratio. + +We denote the total number of tokens by $\mathcal{K}$ , the embedding dimension of each token by $d$ , and all the tokens out + +put by the $L$ -th layer of $\mathfrak{B}_T$ with respect to inputs $X$ and $Z$ by $\mathbf{t}_{1:\mathcal{K}}^L (Z,X;\mathfrak{B}_T)\in \mathbb{R}^{\mathcal{K}\times d}$ . Let $\mathbf{t}_{\mathcal{K}_Z\cup \mathcal{K}_X}^L (Z,X;\mathfrak{B}_T) = \mathbf{t}_{1:\mathcal{K}}^L (Z,X;\mathfrak{B}_T)$ , where $\mathcal{K}_Z\cup \mathcal{K}_X = [1,\mathcal{K}]$ , $\mathbf{t}_{\mathcal{K}_Z}^L$ and $\mathbf{t}_{\mathcal{K}_X}^L$ represent the tokens corresponding to the template and the search image, respectively. By the same token, the output tokens corresponding to inputs $X$ and $Z'$ are $\mathbf{t}_{1:\mathcal{K}}^L (Z',X;\mathfrak{B}_T)$ . The feature representations of $Z$ and $Z'$ can be recovered by tracking their token indices in respective ordered sequences, which specifically are $t_{1:\mathcal{K}_z}^L (Z,X;\mathfrak{B}_T)$ and $t_{1:\mathcal{K}_z}^L (Z',X;\mathfrak{B}_T)$ , respectively. The core idea of our occlusion-robust representations learning is that the mean square error between the feature representation of $Z$ and that of $Z'$ is minimized, which is implemented by minimizing the following MSE loss, + +$$ +\mathcal {L} _ {o r r} = \left\| t _ {1: \mathcal {K} _ {z}} ^ {L} (Z, X; \mathfrak {B} _ {T}) - t _ {1: \mathcal {K} _ {z}} ^ {L} \left(Z ^ {\prime}, X; \mathfrak {B} _ {T}\right) \right\| ^ {2}. \tag {2} +$$ + +During inference, only $[Z,X]$ is input to the model without the need for random template masking. Consequently, our method incurs no additional computational cost during inference. Notably, our method is independent of the ViTs used, any efficient ViTs can work in our framework. + +# 3.3. Adaptive Feature-Based Knowledge Distillation (AFKD) + +Feature-based knowledge distillation is a technique in machine learning that trains a smaller student model to mimic a larger teacher model, which, instead of focusing only on final outputs, transfers intermediate features or representations from the teacher to the student [26, 78]. This method uses the detailed internal representations from the teacher model to improve the student's learning process. However, there is a risk that the student model might overfit to the specific features of the teacher model, rather than generalizing well to new data. This can be particularly problematic if the teacher model has learned spurious correlations in the data. To combat this, we propose adaptively transferring knowledge based on the difficulty of the tracking task. We quantify this difficulty using the deviation of the GIoU loss [67] (see Section 3.4) from its average value, calculated between the student's prediction and the ground truth. Adapting knowledge transfer based on difficulty ensures that the student model doesn't heavily adjust its weights on easy tasks, which it can handle already probably due to its generalizability. Instead, it focuses more on challenging scenarios where its feature representation is less effective. + +Additionally, the choice of teacher-student architectures is crucial in knowledge distillation. Given the wide array of possible student models, we adopt a self-similar approach where the student model mirrors the teacher's architecture but employs a smaller ViT backbone, using fewer ViT blocks. This strategy simplifies the design and eliminates the need for additional alignment techniques that would otherwise be necessary due to mismatched feature di + +mensions. Lastly, layer selection and the metric of feature similarity are also crucial aspects of feature-based knowledge distillation. Given MSE's popularity in feature-based knowledge distillation and to avoid potential complexity associated with using multiple layers, we employ MSE to penalize differences between the output feature representations of both the teacher and student model's backbones, i.e., $t_{1:\mathcal{K}}^{L}(Z,X;\mathfrak{B}_{T})$ and $t_{1:\mathcal{K}}^{L}(Z,X;\mathfrak{B}_{S})$ . The proposed adaptive knowledge distillation loss is defined by + +$$ +\mathcal {L} _ {a f k d} = (\alpha + \beta \left(\mathcal {L} _ {i o u} - \overline {{\mathcal {L} _ {i o u}}}\right)) \| t _ {1: \mathcal {K}} ^ {L} (Z, X; \mathfrak {B} _ {T}) - t _ {1: \mathcal {K}} ^ {L} (Z, X; \mathfrak {B} _ {S}) \| ^ {2}, \tag {3} +$$ + +where $\alpha + \beta (\mathcal{L}_{iou} - \overline{\mathcal{L}_{iou}}) \coloneqq \varpi (\mathcal{L}_{iou}; \alpha, \beta)$ is a function of the deviation of GIoU loss from its average, with slop $\alpha$ and intercept $\beta$ , used to quantify the difficulty of the tracking task. + +# 3.4. Prediction Head and Training Loss + +Following the corner detection head in [13, 91], we use a prediction head consisting of multiple Conv-BN-ReLU layers to directly estimate the bounding box of the target. The output tokens corresponding to the search image are first reinterpreted to a 2D spatial feature map and then fed into the prediction head. The head outputs a local offset $\mathbf{o} \in [0,1]^{2 \times H_x / P \times W_x / P}$ , a normalized bounding box size $\mathbf{s} \in [0,1]^{2 \times H_x / P \times W_x / P}$ , and a target classification score $\mathbf{p} \in [0,1]^{H_x / P \times W_x / P}$ as prediction outcomes. The initial estimation of the target position depends on identifying the location with the highest classification score, i.e., $(x_c, y_c) = \operatorname{argmax}_{(x,y)} \mathbf{p}(x,y)$ . The final target bounding box is estimated by $\{(x_t, y_t); (w,h)\} = \{(x_c, y_c) + \mathbf{o}(x_c, y_c); \mathbf{s}(x_c, y_c)\}$ . For the tracking task, we adopt the weighted focal loss [40] for classification, a combination of $L_1$ loss and GIoU loss [67] for bounding box regression. The total loss for tracking prediction is: + +$$ +\mathcal {L} _ {p r e d} = \mathcal {L} _ {c l s} + \lambda_ {i o u} \mathcal {L} _ {i o u} + \lambda_ {L _ {1}} \mathcal {L} _ {L _ {1}}, \tag {4} +$$ + +where the constants $\lambda_{iou} = 2$ and $\lambda_{L_1} = 5$ are set as in [13, 91]. The overall loss $\mathcal{L}_T = \mathcal{L}_{pred} + \gamma \mathcal{L}_{orr}$ is used to train the teacher end-to-end after loading the pretrained weights of the ViT trained with ImageNet [68], where the constant $\gamma$ is set to $2.0 \times 10^{-4}$ . After this training, we fix the weights of the teacher model, and employ the overall loss $\mathcal{L}_S = \mathcal{L}_{pred} + \mathcal{L}_{afkd}$ , for end-to-end knowledge distillation training. + +# 4. Experiments + +We evaluate our method on four UAV tracking benchmarks: DTB70 [45], UAVDT [19], VisDrone2018 [98], and UAV123 [57]. All experiments run on a PC with an i9-10850K processor, 16GB RAM, and an NVIDIA TitanX GPU. We compare our method against 26 state-of-the-art trackers, using their official codes and hyper + +Table 1. Precision (Prec.), success rate (Succ.), and speed (FPS) comparison between ORTrack and lightweight trackers on four UAV tracking benchmarks, i.e., DTB70 [45], UAVDT [19], VisDrone2018 [98], and UAV123 [57]. Red, blue and green indicate the first, second and third place. Note that the percent symbol (\%) is omitted for all Prec. and Succ. values. + +
MethodSourceDTB70UAVDTVisDrone2018UAV123Avg.Avg.FPSFLOPs (GMac)Param. (M)
Prec.Succ.Prec.Succ.Prec.Succ.Prec.Succ.Prec.Succ.GPUCPU
DCF-basedKCF [29]TAPMI 1546.828.057.129.068.541.352.333.156.232.9-624.3--
fDSST [16]TPAMI 1753.435.766.638.369.851.058.340.562.041.4-193.4--
ECO_HC [14]CVPR 1763.544.869.441.680.858.171.049.671.248.5-83.5--
AutoTrack [46]CVPR 2071.647.871.845.078.857.368.947.272.849.3-57.8--
RACF [42]PR 2272.650.577.349.483.460.070.247.775.951.8-35.6--
CNN-basedHiFT [4]ICCV 2180.259.465.247.571.952.678.759.074.054.6160.3-7.29.9
TCTrack [5]CVPR 2281.262.272.553.079.959.480.060.578.458.8149.6-8.89.7
SGDViT [90]ICRA 2378.560.465.748.072.152.175.457.572.954.5110.5-11.323.3
DRCI [93]ICME 2381.461.884.059.083.460.076.759.781.460.1281.362.73.68.8
PRL-Track [22]IROSS 2479.560.673.153.572.653.879.159.376.156.8132.3-7.412.0
VIT-basedAba-ViTrack [44]ICCV 2385.966.483.459.986.165.386.466.485.564.5181.550.32.48.0
SMAT [25]WACV 2481.963.880.858.782.563.481.864.681.862.6126.8-3.28.6
AVTrack-DeiT [47]ICML 2484.365.082.158.786.065.384.866.884.263.8260.359.80.97-1.93.5-7.9
ORTrack-DeiTOurs86.266.483.460.188.666.884.366.485.665.0226.455.42.47.9
ORTrack-D-DeiT83.765.182.559.784.663.984.066.183.763.7292.364.71.55.3
+ +parameters. We evaluate our approach against 13 state-of-the-art (SOTA) lightweight trackers (see Table 1) and 14 SOTA deep trackers designed specifically for generic visual tracking (refer to Table 2). + +# 4.1. Implementation Details + +We adopt different ViTs as backbones, including ViT-tiny [18], Eva-tiny [21], and DeiT-tiny [73], to build three trackers for evaluation: ORTrack-ViT, ORTrack-Eva, and ORTrack-DeiT. The head of ORTrack consists of a stack of four Conv-BN-ReLU layers. The search region and template sizes are set to $256 \times 256$ and $128 \times 128$ , respectively. A combination of training sets from GOT-10k [30], LaSOT [20], COCO [48], and TrackingNet [56] is used for the training. The batch size is set to 32. We employ the AdamW optimizer [50], with a weight decay of $10^{-4}$ and an initial learning rate of $4 \times 10^{-5}$ . The training is conducted over 300 epochs, with 60,000 image pairs processed in each epoch. The learning rate is reduced by a factor of 10 after 240 epochs. + +# 4.2. State-of-the-art Comparison + +Comparison with Lightweight Trackers. The overall performance of our ORTrack in comparison to 13 competing trackers on the four benchmarks is displayed in Table 1. As can be seen, our trackers demonstrate superior performance among all these trackers in terms of average (Avg.) precision (Prec.), success rate (Succ.) and speeds. On average, RACF [42] demonstrated the highest Prec. $(75.9\%)$ and Succ. $(51.8\%)$ among DCF-based trackers, DRCI [93] achieves the highest precision and success rates, with $81.4\%$ and $60.1\%$ , respectively, among CNN-based trackers. However, the average Prec. and Succ. of all our trackers are greater than $82.0\%$ and $62.0\%$ , respectively, clearly surpassing DCF- and CNN- based approaches. Additionally, our ORTrack-DeiT achieves the highest Avg. Prec. and + +Avg. Succ. of $85.6\%$ and $65.0\%$ , respectively, among all competing trackers. Although Aba-ViT track achieves performance close to our ORTrack-DeiT, its GPU speed is significantly lower, with a $23.6\%$ relative gap. Notably, when the proposed adaptive knowledge distillation is applied to ORTrack-DeiT, the resulting student model, ORTrack-D-DeiT, shows a significant speed increase: $29.1\%$ on GPU and $16.8\%$ on CPU. This improvement is accompanied by a minimal reduction in accuracy, with only a $1.9\%$ decrease in Avg. Prec. and a $1.3\%$ decrease in Avg. Succ.. All proposed trackers can run in real-time on a single $\mathbf{CPU}^*$ and our ORTrack-DeiT sets a new performance record for real-time UAV tracking. We also compare the floating point operations per second (FLOPs) and number of parameters (Params.) of our method with CNN-based and ViT-based trackers in Table 1. Our method demonstrates a relatively lower parameter count and reduced computational complexity compared to these approaches. Notably, since AVTrack-DeiT tracker features adaptive architectures, the FLOPs and parameters range from minimum to maximum values. These results highlight our method's effectiveness and its state-of-the-art performance. + +Comparison with Deep Trackers. The proposed ORTrack-DeiT is also compared with 14 SOTA deep trackers in Table 2, which shows precision (Prec.) and GPU speed on VisDrone2018. Our ORTrack-DeiT surpasses all other methods in both metrics, demonstrating its superior accuracy and speed. Although trackers like AQATrack [87], HIPTrack [2], and ROMTrack [3] achieve precision comparable to our ORTrack-DeiT, their GPU speeds are much slower. Specifically, our method is 4, 6, and 4 times faster than AQATrack, HIPTrack, and ROMTrack, respectively. + +Attribute-Based Evaluation. To access our method's robustness against target occlusion, we compare ORTrack- + +Table 2. Precision (Prec.) and speed (FPS) comparison between ORTrack-DeiT and deep-based trackers on VisDrone2018 [98]. + +
TrackerSourcePrec.Succ.FPSTrackerSourcePrec.Succ.FPSTrackerSourcePrec.Succ.FPS
ORTrack-DeiTOurs88.666.8206.2ZoomTrack [38]NIPS 2381.463.461.7SimTrack [8]ECCV 2280.060.969.7
AQATrack [87]CVPR 2487.266.953.4SeqTrack [10]CVPR 2385.365.815.3ToMP [55]CVPR 2284.164.421.4
HIPTrack [2]CVPR 2486.767.131.3MAT [96]CVPR 2381.662.268.4KeepTrack [54]ICCV 2184.063.520.3
EVPTrack [69]AAAI 2484.565.822.1SparseTT [23]IJCAI 2281.462.130.2SAOT [97]ICCV 2176.959.135.4
ROMTrack [3]ICCV 2386.466.751.1OSTrack [91]ECCV 2284.264.862.7PrDiMP50 [15]CVPR 2079.459.742.6
+ +![](images/ed103c4c98bb6d874794172cd813dd855455cca2395fd3ffe3039b18f26b538f.jpg) +Figure 3. Attribute-based comparison on the partial occlusion subset of VisDrone2018 [98]. ORTrack-DeiT* refers to ORTrack-DeiT without applying the occlusion-robust enhancement. + +DeiT alongside 16 SOTA trackers on the partial occlusion subset of VisDrone2018. Additionally, we also assess the baseline ORTrack-DeiT*, i.e., ORTrack-DeiT without applying the proposed method for learning Occlusion-Robust Representation (ORR), for comparison. The precision plot are presented in Fig. 3, with additional attribute-based evaluation results provided in the supplemental materials. As observed, ORTrack-DeiT achieves the second-highest precision $(85.0\%)$ , just slightly behind the first-ranked tracker AQATrack by $0.2\%$ . Remarkably, incorporating the proposed components leads to a significant improvement over ORTrack-DeiT*, with increases of $6.9\%$ in Prec., well underscoring the effectiveness of our method. + +# 4.3. Ablation Study + +Table 3. Effect of ORR and AFKD on the baseline trackers. + +
TrackerORRAFKDUAVDTFPS
Prec.Succ.
ORTrack-ViT77.055.6216.2
80.3↑3.358.2↑2.6-
79.1↑2.157.5↑1.9290.3↑34%
ORTrack-Eva78.156.6238.3
80.8↑2.758.7↑2.1-
79.5↑1.457.8↑1.2308.8↑30%
ORTrack-DeiT78.656.7218.4
83.4↑4.860.1↑3.4-
82.5↑3.959.7↑3.0298.7↑36%
+ +Effect of Occlusion-Robust Representations (ORR) and Adaptive Feature-Based Knowledge Distillation + +(AFKD). To demonstrate the effectiveness of the proposed ORR and AFKD, Table 3 shows the evaluation results on UAVDT dataset as these components are gradually integrated into the baselines. To avoid potential variations due to randomness, we only present the speed of the baseline, since the GPU speeds of the baseline and its ORR-enhanced version are theoretically identical. As can be seen, the incorporation of ORR significantly enhances both Prec. and Succ. for all baseline trackers. Specifically, the Prec. increases for ORTrack-ViT, ORTrack-Eva, and ORTrack-DeiT are $3.3\%$ , $2.7\%$ , and $4.8\%$ , respectively, while the Succ. increases are $2.6\%$ , $2.1\%$ , and $3.1\%$ , respectively. These significant enhancements highlight the effectiveness of ORR in improving tracking precision. The further integration of AFKD results in consistent improvements in GPU speeds, with only slight reductions in Prec. and Succ. Specifically, all baseline trackers experience GPU speed enhancements of over $30.0\%$ , with ORTrack-DeiT showing an impressive $36.0\%$ improvement. These results affirm the effectiveness of AFKD in optimizing tracking efficiency while maintaining high tracking performance. + +Table 4. Impact of various Masking Operators on performance. + +
MethodmUmCSAM[37]AdAutoMix[65]CutMix[92]VisDrone2018
Prec.Succ.
ORTrack-DeiT81.662.2
86.765.4
88.666.8
86.865.6
84.363.8
85.764.2
+ +Effect of Masking Operators. To demonstrate the superiority of the proposed masking operator in terms of performance, we evaluate ORTrack-DeiT with various implementations of masking operators (i.e., $\mathfrak{m}_{\mathrm{U}}$ , $\mathfrak{m}_{\mathrm{C}}$ , and SAM [37]) alongside data mixing augmentation methods (i.e., AdAutoMix [65] and CutMix [92]). The evaluation results on VisDrone2018 are presented in Table 4. As shown, although using SAM, AdAutoMix, and CutMix improves performance, the best result achieved with SAM is only comparable to the performance of our $\mathfrak{m}_{\mathrm{U}}$ masking operator. When $\mathfrak{m}_{\mathrm{C}}$ is applied, the improvements are even more substantial, with increases of $7.0\%$ and $4.6\%$ , respectively. These results validate the effectiveness of the proposed ORR component and particularly demonstrate the superiority of the masking operator based on spatial Cox processes. + +Table 5. Impact of the adaptive knowledge distillation loss on the generalizability on LaSOT and TrackingNet. + +
MethodKDAFKDLaSOTTrackingNet
AUCPnormPAUCPnormP
ORTrack-DeiT53.760.852.672.877.867.1
54.061.253.273.178.467.4
54.662.654.373.779.168.2
+ +Impact of the Adaptive Knowledge Distillation Loss. To assess the impact of the adaptive knowledge distillation loss on generalizability, we train ORTrack-DeiT using GOT-10K with $\varpi (\mathcal{L}_{iou};\alpha ,\beta)$ and $\varpi (\mathcal{L}_{iou};\alpha ,0)$ separately, then evaluate them on LaSOT and TrackingNet. The results are shown in Table 5. Note that $\varpi (\mathcal{L}_{iou};\alpha ,0)$ degenerates to a non-adaptive knowledge distillation loss as it becomes a constant. As can be seen, AFKD demonstrates greater performance improvements than KD. For instance, using AFKD results in additional gains of over $1.1\%$ in $P_{norm}$ and $P$ on LaSOT, demonstrating its superior generalizability. + +Table 6. Application of our ORR component to three SOTA trackers: ARTrack [81], GRM [24], and DropTrack[82]. + +
TrackerORRUAVDTVisDrone2018
Prec.Succ.Prec.Succ.
ARTrack[81]77.154.677.759.5
78.5↑1.455.8↑1.279.5↑1.860.8↑1.3
GRM[24]79.057.782.763.4
81.7↑1.759.3↑1.684.8↑2.164.6↑1.2
DropTrack[82]76.955.981.562.7
78.7↑1.857.4↑1.582.8↑1.364.2↑1.5
+ +Application to SOTA trackers. To show the wide applicability of our proposed method, we incorporate the proposed ORR into three existing SOTA trackers: ARTrack [81], GRM [24], and DropTrack [82]. Please note that we replace the model's original backbones with ViT-tiny [18] to reduce training time. As shown in Table 6, incorporating ORR results in significant improvements in both precision and success rates for the three baseline trackers. Specifically, ARTrack, GRM, and DropTrack demonstrate an improvement of more than $1.2\%$ in both precision and success rate across two datasets. These experimental results demonstrate that the proposed ORR component can be seamlessly integrated into existing tracking frameworks, significantly improving tracking accuracy. + +Qualitative Results. Several qualitative tracking results of ORTrack-DeiT and seven SOTA UAV trackers are shown in Fig. 4. As can be seen, only our tracker successfully tracks the targets in all challenging examples, where pose variations, background clusters, and scale variations are presented. In these cases, our method performs significantly better and is more visually appealing, bolstering the effectiveness of the proposed method for UAV tracking. + +Figure 5 shows attention and feature maps produced by + +![](images/305cd2d33c6dc044b94a239788e02caf27d1d84283759f722ef53e9fae001cb9.jpg) +Figure 4. Qualitative evaluation on 3 video sequences from, respectively, UAV123 [57], UAVDT [19], and VisDrone2018 [98] (i.e., person9, S1607, and uav0000180_00050_s). + +![](images/016234796faa3fcbf9a729c4310a83731bee82db9c21aaee76b7a33bb734fac4.jpg) +Figure 5. Visualize the attention map (left) and feature map (right) of the target images. The first row displays the search and masked images with masking ratios of $0\%$ , $10\%$ , $30\%$ , and $70\%$ . The second and third rows show the attention and feature maps generated by ORTrack-DeiT, with and without ORR, respectively. + +ORTrack-DeiT, with and without occlusion-robust enhancement. We observe that ORTrack-DeiT with ORR maintains a clearer focus on the targets and exhibits more consistent feature maps across masking ratios. These results support the effectiveness of our ORR component. + +# 5. Conclusion + +In view of the common challenges posed by target occlusion in UAV tracking, in this work, we proposed to learn Occlusion-Robust Representation (ORR) by imposing an invariance of feature representation of the target with respect to random masking modeled by a spatial Cox process. Moreover, we propose an Adaptive Feature-Based Knowledge Distillation (AFKD) to enhance efficiency. Our approach is notably straightforward and can be easily integrated into other tracking frameworks. Extensive experiments across multiple UAV tracking benchmarks validate the effectiveness of our method, demonstrating that our ORTrack-DeiT achieves SOTA performance. + +Acknowledgments. This work was funded by the Guangxi Natural Science Foundation (Grant No. 2024GXNSFAA010484), and the National Natural Science Foundation of China (Nos. 62466013, 62206123). + +# References + +[1] Wesam A. Askar, Osama Elmowafy, Anca L. Ralescu, Aliaa Abdel-Halim Youssif, and Gamal A. Elnashar. Occlusion detection and processing using optical flow and particle filter. Int. J. Adv. Intell. Paradigms, 15:63-76, 2020. 3 +[2] Wenrui Cai, Qingjie Liu, and Yunhong Wang. Hiptrack: Visual tracking with historical prompts. In CVPR, pages 19258-19267, 2024. 6, 7 +[3] Yidong Cai, Jie Liu, Jie Tang, and Gangshan Wu. Robust object modeling for visual tracking. In ICCV, pages 9589-9600, 2023. 6, 7 +[4] Ziang Cao, Changhong Fu, Junjie Ye, Bowen Li, and Yiming Li. Hift: Hierarchical feature transformer for aerial tracking. In ICCV, pages 15457-15466, 2021. 1, 2, 6 +[5] Ziang Cao, Ziyuan Huang, Liang Pan, Shiwei Zhang, Ziwei Liu, and Changhong Fu. Ttrack: Temporal contexts for aerial tracking. In CVPR, pages 14798-14808, 2022. 1, 2, 6 +[6] Satyaki Chakraborty and Martial Hebert. Learning to track object position through occlusion. ArXiv, abs/2106.10766, 2021. 3 +[7] T-H Chang and Shaogang Gong. Tracking multiple people with a multi-camera system. In Womot, pages 19-26, 2001. 2 +[8] Boyu Chen, Peixia Li, Lei Bai, Lei Qiao, and et al. Backbone is all your need: a simplified architecture for visual object tracking. In ECCV, pages 375-392, 2022. 1, 7 +[9] Guobin Chen, Wongun Choi, Xiang Yu, Tony Han, and et al. Learning efficient object detection models with knowledge distillation. NIPS, 30, 2017. 3 +[10] Xin Chen, Houwen Peng, Dong Wang, Huchuan Lu, and Han Hu. Seqtrack: Sequence to sequence learning for visual object tracking. In CVPR, pages 14572-14581, 2023. 7 +[11] Yuanda Chen. Thinning algorithms for simulating point processes. Florida State University, Tallahassee, FL, 2016. 4 +[12] Cheng Chi, Shifeng Zhang, Junliang Xing, Zhen Lei, S. Li, and Xudong Zou. Pedhunter: Occlusion robust pedestrian detector in crowded scenes. ArXiv, abs/1909.06826, 2019. 3 +[13] Yutao Cui, Cheng Jiang, and et al. Mixformer: End-to-end tracking with iterative mixed attention. In CVPR, pages 13608-13618, 2022. 1, 2, 5 +[14] Martin Danelljan, Goutam Bhat, Fahad Shahbaz Khan, and Michael Felsberg. Eco: Efficient convolution operators for tracking. In CVPR, pages 6638-6646, 2017. 6 + +[15] Martin Danelljan, Luc Van Gool, and Radu Timofte. Probabilistic regression for visual tracking. In CVPR, pages 7181-7190, 2020. 7 +[16] Martin Danelljan, Gustav Hager, Fahad Shahbaz Khan, and et al. Discriminative scale space tracking. IEEE TPAMI, 39(8):1561-1575, 2017. 6 +[17] Soumen Das, Saroj K. Biswas, and Biswajit Purkayastha. Occlusion robust sign language recognition system for indian sign language using cnn and pose features. Multimed. Tools. Appl, 2024. 3 +[18] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, and et al. An image is worth 16x16 words: Transformers for image recognition at scale. ArXiv, abs/2010.11929, 2020. 6, 8 +[19] Dawei Du, Yuankai Qi, Hongyang Yu, and et al. The unmanned aerial vehicle benchmark: Object detection and tracking. In ECCV, pages 375-391, 2018. 5, 6, 8 +[20] Heng Fan, Liting Lin, Fan Yang, and et al. Lasot: A high-quality benchmark for large-scale single object tracking. In CVPR, pages 5369-5378, 2018. 6 +[21] Yuxin Fang, Quan Sun, Xinggang Wang, and et al. Eva-02: A visual representation for neon genesis. Image and Vision Computing, 149:105171, 2024. 6 +[22] Changhong Fu, Xiang Lei, and et al. Progressive representation learning for real-time UAV tracking. In IROS, pages 5072-5079, 2024. 6 +[23] Zhihong Fu, Zehua Fu, Qingjie Liu, Wenrui Cai, and Yunhong Wang. Sparsett: Visual tracking with sparse transformers. arXiv e-prints, 2022. 7 +[24] Shenyuan Gao, Chunluan Zhou, and Jun Zhang. Generalized relation modeling for transformer tracking. In CVPR, pages 18686-18695, 2023. 8 +[25] Goutam Yelluru Gopal and Maria A Amer. Separable self and mixed attention transformers for efficient object tracking. In WACV, pages 6708-6717, 2024. 6 +[26] Jianping Gou, Baosheng Yu, Stephen J Maybank, and Dacheng Tao. Knowledge distillation: A survey. IJCV, 129(6):1789-1819, 2021. 3, 5 +[27] Karthik Hariharakrishnan and Dan Schonfeld. Fast object tracking using adaptive block matching. IEEE TMM, 7:853-859, 2005. 3 +[28] Kaiming He, Xinlei Chen, Saining Xie, and et al. Masked autoencoders are scalable vision learners. In CVPR, pages 15979-15988, 2021. 3 +[29] João F. Henriques, Rui Caseiro, Pedro Martins, and et al. High-speed tracking with kernelized correlation filters. IEEE TPAMI, 37:583-596, 2015. 6 +[30] L. Huang, X. Zhao, and K. Huang. Got-10k: A large high-diversity benchmark for generic object tracking in the wild. IEEE TPAMI, (5), 2021. 6 +[31] Ziyuan Huang, Changhong Fu, and et al. Learning aberrance repressed correlation filters for real-time uav tracking. In ICCV, pages 2891-2900, 2019. 2 + +[32] Janine Illian, Antti Penttinen, Helga Stoyan, and Dieterich Stoyan. Statistical analysis and modelling of spatial point patterns. John Wiley & Sons, 2008. 3, 4 +[33] Michal Irani and Shmuel Peleg. Motion analysis for image enhancement: Resolution, occlusion, and transparency. JVCIR, 4(4):324-335, 1993. 2 +[34] Dippal Israni and Hiren K. Mewada. Feature descriptor based identity retention and tracking of players under intense occlusion in soccer videos. Int. J. Intell. Eng. Syst, 2018. 3 +[35] Minyang Jiang and et al. Occlusion-robust fau recognition by mining latent space of masked autoencoders. Neurocomputing, 569:127107, 2024. 3 +[36] Jung Uk Kim, Ju Won Kwon, and et al. BBC net: Bounding-box critic network for occlusion-robust object detection. IEEE TCSVT, 30:1037-1050, 2020. 3 +[37] Alexander Kirillov, Eric Mintun, and et al. Segment anything. In ICCV, pages 4015-4026, 2023. 7 +[38] Yutong Kou, Jin Gao, Bing Li, and et al. Zoomtrack: Target-aware non-uniform resizing for efficient visual tracking. NIPS, 36:50959-50977, 2023. 7 +[39] Thijs P. Kuipers, Devanshu Arya, and Deepak K. Gupta. Hard occlusions in visual object tracking. In ECCV Workshops, 2020. 3 +[40] Hei Law and Jia Deng. Cornernet: Detecting objects as paired keypoints. IJCV, 128:642-656, 2018. 5 +[41] Luming Li, Chenglizhao Chen, and Xiaowei Zhang. Mask-guided self-distillation for visual tracking. In ICME, pages 1-6, 2022. 3 +[42] Shuiwang Li, Yuting Liu, Qijun Zhao, and Ziliang Feng. Learning residue-aware correlation filters and refining scale for real-time uav tracking. Pattern Recognition, 127:108614, 2022. 1, 2, 3, 6 +[43] Shuiwang Li, Xiangyang Yang, and et al. Learning target-aware vision transformers for real-time uav tracking. IEEE TGRS, 2024. 1 +[44] Shuiwang Li, Yangxiang Yang, Dan Zeng, and Xucheng Wang. Adaptive and background-aware vision transformer for real-time uav tracking. In ICCV, pages 13943-13954, 2023. 1, 2, 6 +[45] Siyi Li and D. Y. Yeung. Visual object tracking for unmanned aerial vehicles: A benchmark and new motion models. In AAAI, 2017. 5, 6 +[46] Yiming Li, Changhong Fu, Fangqiang Ding, and et al. Autotrack: Towards high-performance visual tracking for uav with automatic spatio-temporal regularization. In CVPR, pages 11920-11929, 2020. 1, 2, 6 +[47] Yongxin Li, Mengyuan Liu, You Wu, and et al. Learning adaptive and view-invariant vision transformer for real-time uav tracking. In ICML, 2024. 6 +[48] Tsung Yi Lin, Michael Maire, and et al. Microsoft coco: Common objects in context. In ECCV, 2014. 6 + +[49] Mengyuan Liu, Yuelong Wang, and et al. Global filter pruning with self-attention for real-time uav tracking. In BMVC, page 861, 2022. 1 +[50] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017. 6 +[51] David G Lowe. Object recognition from local scale-invariant features. In ICCV, pages 1150-1157, 1999. 2 +[52] Siyu Ma, Yuting Liu, and et al. Learning disentangled representation in pruning for real-time uav tracking. In ACML, pages 690-705, 2023. 1 +[53] Torsten Mattfeldt. Stochastic geometry and its applications. Journal of Microscopy, 183:257-257, 1996. 4 +[54] Christoph Mayer, Martin Danelljan, and et al. Learning target candidate association to keep track of what not to track. In ICCV, pages 13424-13434, 2021. 7 +[55] Christoph Mayer, Martin Danelljan, and et al. Transforming model prediction for tracking. In CVPR, pages 8721-8730, 2022. 7 +[56] Matthias Mueller, Adel Bibi, and et al. Trackingnet: A large-scale dataset and benchmark for object tracking in the wild. In ECCV, pages 300-317, 2018. 6 +[57] Matthias Mueller, Neil G. Smith, and Bernard Ghanem. A benchmark and simulator for uav tracking. In ECCV, 2016. 5, 6, 8 +[58] Hieu Tat Nguyen and Arnold W. M. Smeulders. Fast occluded object tracking by a robust appearance filter. IEEE TPAMI, 26:1099-1104, 2004. 3 +[59] Hieu Tat Nguyen, Marcel Worring, and Rein van den Boomgaard. Occlusion robust adaptive template tracking. In ICCV, pages 678-683, 2001. 3 +[60] Toby C. O’Neil. Geometric measure theory. 2002. 4 +[61] Jiyan Pan and Bo Hu. Robust occlusion handling in object tracking. In CVPR, pages 1-8, 2007. 3 +[62] Joo Hyun Park, Yeong Min Oh, and et al. Handoccnet: Occlusion-robust 3d hand mesh estimation network. In CVPR, pages 1486–1495, 2022. 2, 3 +[63] Wonpyo Park and et al. Relational knowledge distillation. In CVPR, pages 3962-3971, 2019. 3 +[64] Zhimao Peng, Zechao Li, Junge Zhang, and et al. Few-shot image recognition with knowledge transfer. In ICCV, pages 441-449, 2019. 3 +[65] Huafeng Qin, Xin Jin, Yun Jiang, Mounim A El-Yacoubi, and Xinbo Gao. Adversarial automixup. arXiv preprint arXiv:2312.11954, 2023. 7 +[66] Delin Qu, Yizhen Lao, and et al. Towards nonlinear-motion-aware and occlusion-robust rolling shutter correction. ICCV, pages 10646–10654, 2023. 3 +[67] Seyed Hamid Rezatofighi, Nathan Tsoi, JunYoung Gwak, and et al. Generalized intersection over union: + +A metric and a loss for bounding box regression. +CVPR, pages 658-666, 2019. 2, 5 +[68] Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, and et al. Imagenet large scale visual recognition challenge. IJCV, 115:211 - 252, 2014. 5 +[69] Liangtao Shi, Bineng Zhong, Qihua Liang, Ning Li, Shengping Zhang, and Xianxian Li. Explicit visual prompts for visual object tracking. In AAAI, 2024. 7 +[70] Abhinav Shrivastava, Abhinav Kumar Gupta, and Ross B. Girshick. Training region-based object detectors with online hard example mining. In CVPR, pages 761-769, 2016. 2 +[71] Markus Storer and et al. Active appearance model fitting under occlusion using fast-robust pca. In VISAPP, pages 129–136, 2009. 2 +[72] Chen Sun and et al. Siamohot: A lightweight dual siamese network for onboard hyperspectral object tracking via joint spatial-spectral knowledge distillation. IEEE TGRS, 61:1-12, 2023. 3 +[73] Hugo Touvron and et al. Training data-efficient image transformers & distillation through attention. In ICML, pages 10347-10357, 2021. 6 +[74] Wenxuan Tu, Sihang Zhou, and et al. Hierarchically contrastive hard sample mining for graph self-supervised pretraining. IEEE TNNLS, PP, 2023. 2 +[75] Frederick Tung and Greg Mori. Similarity-preserving knowledge distillation. In ICCV, pages 1365-1374, 2019. 3 +[76] K. Wang and et al. Region attention networks for pose and occlusion robust facial expression recognition. IEEE TIP, 29:4057-4069, 2019. 2, 3 +[77] Keze Wang and et al. Towards human-machine cooperation: Self-supervised sample mining for object detection. In CVPR, pages 1605-1613, 2018. 2 +[78] Lin Wang and Kuk-Jin Yoon. Knowledge distillation and student-teacher learning for visual intelligence: A review and new outlooks. IEEE TPAMI, 44:3048-3068, 2020. 3, 5 +[79] Xucheng Wang, Xiangyang Yang, and et al. Learning disentangled representation with mutual information maximization for real-time uav tracking. In ICME, pages 1331-1336, 2023. 1 +[80] Xucheng Wang, Dan Zeng, Qijun Zhao, and Shuiwang Li. Rank-based filter pruning for real-time uav tracking. In ICME, pages 01-06, 2022. 1, 2 +[81] Xing Wei, Yifan Bai, and et al. Autoregressive visual tracking. In CVPR, pages 9697-9706, 2023. 8 +[82] Qiangqiang Wu, Tianyu Yang, and et al. Dropmae: Masked autoencoders with spatial-attention dropout for tracking tasks. In CVPR, pages 14561-14571, 2023. 1, 8 + +[83] Wanying Wu, Pengzhi Zhong, and Shuiwang Li. Fisher pruning for real-time uav tracking. In IJCNN, pages 1-7, 2022. 1, 2 +[84] You Wu, Xucheng Wang, Dan Zeng, and et al. Learning motion blur robust vision transformers with dynamic early exit for real-time uav tracking. arXiv preprint arXiv:2407.05383, 2024.1 +[85] Fei Xie, Chunyu Wang, and et al. Learning tracking representations via dual-branch fully transformer networks. In ICCV, pages 2688–2697, 2021. 2 +[86] Fei Xie, Chunyu Wang, Guangting Wang, and et al. Correlation-aware deep tracking. In CVPR, pages 8741-8750, 2022. 2 +[87] Jinxia Xie and et al. Autoregressive queries for adaptive tracking with spatio-temporal transformers. In CVPR, pages 19300-19309, 2024. 6, 7 +[88] Di Yang and et al. Self-supervised video pose representation learning for occlusion- robust action recognition. In AFGR, pages 1-5, 2021. 3 +[89] Xiangyang Yang, Dan Zeng, and et al. Adaptively bypassing vision transformer blocks for efficient visual tracking. Pattern Recognition, 161:111278, 2025. 2 +[90] Liangliang Yao, Changhong Fu, and et al. Sgdvit: Saliency-guided dynamic vision transformer for uav tracking. arXiv preprint arXiv:2303.04378, 2023. 6 +[91] Botao Ye, Hong Chang, and et al. Joint feature learning and relation modeling for tracking: A one-stream framework. In ECCV, pages 341-357, 2022. 1, 2, 5, 7 +[92] Sangdoo Yun, Dongyoon Han, and et al. Cutmix: Regularization strategy to train strong classifiers with localizable features. In ICCV, pages 6023-6032, 2019. 7 +[93] Dan Zeng, Mingliang Zou, Xucheng Wang, and Shuiwang Li. Towards discriminative representations with contrastive instances for real-time uav tracking. In ICME, pages 1349-1354, 2023. 6 +[94] Chenyuan Zhang, Jiu Xu, and et al. A klt-based approach for occlusion handling in human tracking. In PCS, pages 337-340, 2012. 3 +[95] Yi Zhang, Pengliang Ji, and et al. 3d-aware neural body fitting for occlusion robust 3d human pose estimation. ICCV, pages 9365-9376, 2023. 3 +[96] Haojie Zhao, Dong Wang, and Huchuan Lu. Representation learning for visual object tracking by masked appearance transfer. In CVPR, pages 18696-18705, 2023. 7 +[97]Zikun Zhou, Wenjie Pei, Xin Li, and et al. Saliencyassociated object tracking. In ICCV, pages 9846- 9855,2021.7 +[98] Pengfei Zhu, Longyin Wen, and et al. Visdrone-vdt2018: The vision meets drone video detection and tracking challenge results. In ECCV Workshops, 2018. 5, 6, 7, 8 \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09228/images/016234796faa3fcbf9a729c4310a83731bee82db9c21aaee76b7a33bb734fac4.jpg b/data/2025/2504_09xxx/2504.09228/images/016234796faa3fcbf9a729c4310a83731bee82db9c21aaee76b7a33bb734fac4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..03545ef6a2d727c5987c094067abc358832968dc --- /dev/null +++ b/data/2025/2504_09xxx/2504.09228/images/016234796faa3fcbf9a729c4310a83731bee82db9c21aaee76b7a33bb734fac4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30adf9ac87d72dcf755ed706384ef4f0289cae2cba37c94351da0c7d99e71d5f +size 35583 diff --git a/data/2025/2504_09xxx/2504.09228/images/0797ddf346306b391113d1ad5a815add8a24529d2d51fe9d06dc617dc39d1caf.jpg b/data/2025/2504_09xxx/2504.09228/images/0797ddf346306b391113d1ad5a815add8a24529d2d51fe9d06dc617dc39d1caf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..824c1938ecd8f70edaa75e51aee97d0737768a76 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09228/images/0797ddf346306b391113d1ad5a815add8a24529d2d51fe9d06dc617dc39d1caf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09e588b088ccf1dd54dd49c521223782736256c9973e6872d7728a042882e5e7 +size 18535 diff --git a/data/2025/2504_09xxx/2504.09228/images/24a5d9de226bc27e8074d66ea960e0b8fbefd81096aea0e59ab84594da256ccc.jpg b/data/2025/2504_09xxx/2504.09228/images/24a5d9de226bc27e8074d66ea960e0b8fbefd81096aea0e59ab84594da256ccc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c3713a6ef334fc6b0add46104558d56777e5bcb4 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09228/images/24a5d9de226bc27e8074d66ea960e0b8fbefd81096aea0e59ab84594da256ccc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:943eb74c290063d49a664e206f22068e3f12e66d8b6cda49d06d7132b14e4d8f +size 36290 diff --git a/data/2025/2504_09xxx/2504.09228/images/2b133809dc2006deefd4113c9e26f835295f27716544a7f45f59a3fb96f5f56b.jpg b/data/2025/2504_09xxx/2504.09228/images/2b133809dc2006deefd4113c9e26f835295f27716544a7f45f59a3fb96f5f56b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e67c08a0ecca12588a9f42bf3c8ec31d6403b44c --- /dev/null +++ b/data/2025/2504_09xxx/2504.09228/images/2b133809dc2006deefd4113c9e26f835295f27716544a7f45f59a3fb96f5f56b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31349200b97c8157edf9427e835e9960548ea7fc06c16443dc2ef03298665bdf +size 8477 diff --git a/data/2025/2504_09xxx/2504.09228/images/305cd2d33c6dc044b94a239788e02caf27d1d84283759f722ef53e9fae001cb9.jpg b/data/2025/2504_09xxx/2504.09228/images/305cd2d33c6dc044b94a239788e02caf27d1d84283759f722ef53e9fae001cb9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2b68eec8c6d344055469f2a479ee3abc7f2d68be --- /dev/null +++ b/data/2025/2504_09xxx/2504.09228/images/305cd2d33c6dc044b94a239788e02caf27d1d84283759f722ef53e9fae001cb9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df5c4d87bf594c6c8cf65097b27335bac65427c87fcc624dd98f9668dabfec9a +size 63704 diff --git a/data/2025/2504_09xxx/2504.09228/images/30f4f63d5479c9e7c39cc7978d66b2f065cec62132404c0b0f5b0ec73dd8fa3a.jpg b/data/2025/2504_09xxx/2504.09228/images/30f4f63d5479c9e7c39cc7978d66b2f065cec62132404c0b0f5b0ec73dd8fa3a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4b3228379497a543ade625baca6175cd757e9551 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09228/images/30f4f63d5479c9e7c39cc7978d66b2f065cec62132404c0b0f5b0ec73dd8fa3a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3ca27ce4dfacae39d07cb3f18b182c9f275de7e89d20e30b93ed6ada5e5446e +size 61275 diff --git a/data/2025/2504_09xxx/2504.09228/images/37fb7df7758917810ca1c85bac13eb3bdf9ab53cfda5b17d8b280d79f44bda26.jpg b/data/2025/2504_09xxx/2504.09228/images/37fb7df7758917810ca1c85bac13eb3bdf9ab53cfda5b17d8b280d79f44bda26.jpg new file mode 100644 index 0000000000000000000000000000000000000000..520d08509a3bf4b270cc89fa7401b956308a74e6 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09228/images/37fb7df7758917810ca1c85bac13eb3bdf9ab53cfda5b17d8b280d79f44bda26.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8cd623aac6f8374be2b7ecb51af5f6c97fc9699b6e2e92db0270f8d4fba2c8e +size 5330 diff --git a/data/2025/2504_09xxx/2504.09228/images/3b4f89479c5ff8243e6be20cd787d0178a5864faf1227a509ce683510651c3ac.jpg b/data/2025/2504_09xxx/2504.09228/images/3b4f89479c5ff8243e6be20cd787d0178a5864faf1227a509ce683510651c3ac.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5e7defb45ad85b3a8bdddf20d52901b57ebd192b --- /dev/null +++ b/data/2025/2504_09xxx/2504.09228/images/3b4f89479c5ff8243e6be20cd787d0178a5864faf1227a509ce683510651c3ac.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:958630938eb68d5dbb6a09d7c3b450fc83908f90d085fe26edf448d56b889571 +size 26038 diff --git a/data/2025/2504_09xxx/2504.09228/images/4082fd996ec64079d2abc075f06f60d7af465a0f16f8a18689b4c86819470823.jpg b/data/2025/2504_09xxx/2504.09228/images/4082fd996ec64079d2abc075f06f60d7af465a0f16f8a18689b4c86819470823.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1f15f32081b751ea0ecae73452f4d7cde03dd98c --- /dev/null +++ b/data/2025/2504_09xxx/2504.09228/images/4082fd996ec64079d2abc075f06f60d7af465a0f16f8a18689b4c86819470823.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:239113d292833fb3a42efd7c5e241a2ff3c53f5d9e0acdeb92ebe5b8023e2547 +size 105585 diff --git a/data/2025/2504_09xxx/2504.09228/images/778d6455e7c357455f6bc0b24a76e246ad665f3f59a3cf71913b262d79b1b256.jpg b/data/2025/2504_09xxx/2504.09228/images/778d6455e7c357455f6bc0b24a76e246ad665f3f59a3cf71913b262d79b1b256.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a82dc3c1b35c39fd95eec7bd580be9e41589702e --- /dev/null +++ b/data/2025/2504_09xxx/2504.09228/images/778d6455e7c357455f6bc0b24a76e246ad665f3f59a3cf71913b262d79b1b256.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63f42f504d48f373e99448f02dc8ec39c81fdd79689dcae666437c4add657c3c +size 30089 diff --git a/data/2025/2504_09xxx/2504.09228/images/8b76efc3ec6e4fe4cc0bf2f96a3188a5932a30f62326b9f8de3ca91405f2d0d4.jpg b/data/2025/2504_09xxx/2504.09228/images/8b76efc3ec6e4fe4cc0bf2f96a3188a5932a30f62326b9f8de3ca91405f2d0d4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cca181c56612c0a1e718a224a0c34472278a0d22 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09228/images/8b76efc3ec6e4fe4cc0bf2f96a3188a5932a30f62326b9f8de3ca91405f2d0d4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9965afb93b2a9a2ffe9a4a632520d255ed8c9c7815e3fed9430b375e9470a7e4 +size 4181 diff --git a/data/2025/2504_09xxx/2504.09228/images/a32930026e85095da5c264c34b784b8587c0f7197f937d9831316c7c501c4002.jpg b/data/2025/2504_09xxx/2504.09228/images/a32930026e85095da5c264c34b784b8587c0f7197f937d9831316c7c501c4002.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e701b1f36d7dc45c800e3280b5ee2faf5758baf7 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09228/images/a32930026e85095da5c264c34b784b8587c0f7197f937d9831316c7c501c4002.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4829cb2a513cc1c9a0dbe54058751ed84610f4d14194663a5172be71b254fab9 +size 19597 diff --git a/data/2025/2504_09xxx/2504.09228/images/c82e91d033841d1ddd340a9f4b0e674e188bc0ed043c7dd185a55c438f935319.jpg b/data/2025/2504_09xxx/2504.09228/images/c82e91d033841d1ddd340a9f4b0e674e188bc0ed043c7dd185a55c438f935319.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d66d45f6322f2a8c196732f5496ac273686a3093 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09228/images/c82e91d033841d1ddd340a9f4b0e674e188bc0ed043c7dd185a55c438f935319.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a9559e898448f1d909c336aec56d63829335657a338820bf25fcddac34a5c8f +size 5950 diff --git a/data/2025/2504_09xxx/2504.09228/images/e108445084f2db53ba04ebd300c9ae730732181ffe90090330cd8f3bc9c758c5.jpg b/data/2025/2504_09xxx/2504.09228/images/e108445084f2db53ba04ebd300c9ae730732181ffe90090330cd8f3bc9c758c5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e36a8144843a905be16402753536d2e327fe281a --- /dev/null +++ b/data/2025/2504_09xxx/2504.09228/images/e108445084f2db53ba04ebd300c9ae730732181ffe90090330cd8f3bc9c758c5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45bdc235545bf43e45f08743335081cb8b6097af158b28b9241b5e7314e7e59a +size 124869 diff --git a/data/2025/2504_09xxx/2504.09228/images/ed103c4c98bb6d874794172cd813dd855455cca2395fd3ffe3039b18f26b538f.jpg b/data/2025/2504_09xxx/2504.09228/images/ed103c4c98bb6d874794172cd813dd855455cca2395fd3ffe3039b18f26b538f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2c4dc0f3498acba985089a5b0fb4abf1cf7c4150 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09228/images/ed103c4c98bb6d874794172cd813dd855455cca2395fd3ffe3039b18f26b538f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a3ef0a64153ae5a59cfcb3b77a9fdec43cdd7279c53291806183f3566cacc26 +size 57154 diff --git a/data/2025/2504_09xxx/2504.09228/layout.json b/data/2025/2504_09xxx/2504.09228/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..ca2326908a171004dc708c9b5b04191b2a9f27b6 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09228/layout.json @@ -0,0 +1,10228 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 64, + 102, + 545, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 102, + 545, + 121 + ], + "spans": [ + { + "bbox": [ + 64, + 102, + 545, + 121 + ], + "type": "text", + "content": "Learning Occlusion-Robust Vision Transformers for Real-Time UAV Tracking" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 147, + 142, + 462, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 142, + 462, + 171 + ], + "spans": [ + { + "bbox": [ + 147, + 142, + 462, + 171 + ], + "type": "text", + "content": "You Wu" + }, + { + "bbox": [ + 147, + 142, + 462, + 171 + ], + "type": "inline_equation", + "content": "^{1\\dagger}" + }, + { + "bbox": [ + 147, + 142, + 462, + 171 + ], + "type": "text", + "content": ", Xucheng Wang" + }, + { + "bbox": [ + 147, + 142, + 462, + 171 + ], + "type": "inline_equation", + "content": "^{2\\dagger}" + }, + { + "bbox": [ + 147, + 142, + 462, + 171 + ], + "type": "text", + "content": ", Xiangyang Yang" + }, + { + "bbox": [ + 147, + 142, + 462, + 171 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 147, + 142, + 462, + 171 + ], + "type": "text", + "content": ", Mengyuan Liu" + }, + { + "bbox": [ + 147, + 142, + 462, + 171 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 147, + 142, + 462, + 171 + ], + "type": "text", + "content": ", Dan Zeng" + }, + { + "bbox": [ + 147, + 142, + 462, + 171 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 147, + 142, + 462, + 171 + ], + "type": "text", + "content": ", Hengzhou Ye" + }, + { + "bbox": [ + 147, + 142, + 462, + 171 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 147, + 142, + 462, + 171 + ], + "type": "text", + "content": ", Shuiwang Li" + }, + { + "bbox": [ + 147, + 142, + 462, + 171 + ], + "type": "inline_equation", + "content": "^{1*}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 92, + 171, + 517, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 171, + 517, + 185 + ], + "spans": [ + { + "bbox": [ + 92, + 171, + 517, + 185 + ], + "type": "text", + "content": "1College of Computer Science and Engineering, Guilin University of Technology, China" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 146, + 185, + 465, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 185, + 465, + 199 + ], + "spans": [ + { + "bbox": [ + 146, + 185, + 465, + 199 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 146, + 185, + 465, + 199 + ], + "type": "text", + "content": "School of Computer Science, Fudan University, Shanghai, China" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 129, + 199, + 480, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 199, + 480, + 213 + ], + "spans": [ + { + "bbox": [ + 129, + 199, + 480, + 213 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 129, + 199, + 480, + 213 + ], + "type": "text", + "content": "School of Artificial Intelligence, Sun Yat-sen University, Zhuhai, China" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 66, + 216, + 541, + 241 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 216, + 541, + 241 + ], + "spans": [ + { + "bbox": [ + 66, + 216, + 541, + 241 + ], + "type": "text", + "content": "wuyou@glut.edu.cn, xcwang317@glut.edu.cn, xyyang317@163.com, mengyuaner1122@foxmail.com, zengd8@mail.sysu.edu.cn, yehengzhou@glut.edu.cn, lishuiwang0721@163.com" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 151, + 269, + 200, + 280 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 269, + 200, + 280 + ], + "spans": [ + { + "bbox": [ + 151, + 269, + 200, + 280 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 293, + 296, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 293, + 296, + 594 + ], + "spans": [ + { + "bbox": [ + 55, + 293, + 296, + 594 + ], + "type": "text", + "content": "Single-stream architectures using Vision Transformer (ViT) backbones show great potential for real-time UAV tracking recently. However, frequent occlusions from obstacles like buildings and trees expose a major drawback: these models often lack strategies to handle occlusions effectively. New methods are needed to enhance the occlusion resilience of single-stream ViT models in aerial tracking. In this work, we propose to learn Occlusion-Robust Representations (ORR) based on ViTs for UAV tracking by enforcing an invariance of the feature representation of a target with respect to random masking operations modeled by a spatial Cox process. Hopefully, this random masking approximately simulates target occlusions, thereby enabling us to learn ViTs that are robust to target occlusion for UAV tracking. This framework is termed ORTrack. Additionally, to facilitate real-time applications, we propose an Adaptive Feature-Based Knowledge Distillation (AFKD) method to create a more compact tracker, which adaptively mimics the behavior of the teacher model ORTrack according to the task's difficulty. This student model, dubbed ORTrack-D, retains much of ORTrack's performance while offering higher efficiency. Extensive experiments on multiple benchmarks validate the effectiveness of our method, demonstrating its state-of-the-art performance. Codes is available at https://github.com/wuyou3474/ORTrack." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 615, + 135, + 627 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 615, + 135, + 627 + ], + "spans": [ + { + "bbox": [ + 55, + 615, + 135, + 627 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 635, + 296, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 635, + 296, + 696 + ], + "spans": [ + { + "bbox": [ + 55, + 635, + 296, + 696 + ], + "type": "text", + "content": "Unmanned aerial vehicles (UAVs) are leveraged in a plethora of applications, with increasing emphasis on UAV tracking [4, 43, 46, 49, 52, 79, 84]. This form of tracking poses an exclusive set of challenges such as tricky viewing angles, motion blur, severe occlusions, and the need for" + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 316, + 268, + 555, + 413 + ], + "blocks": [ + { + "bbox": [ + 316, + 268, + 555, + 413 + ], + "lines": [ + { + "bbox": [ + 316, + 268, + 555, + 413 + ], + "spans": [ + { + "bbox": [ + 316, + 268, + 555, + 413 + ], + "type": "image", + "image_path": "24a5d9de226bc27e8074d66ea960e0b8fbefd81096aea0e59ab84594da256ccc.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 422, + 555, + 465 + ], + "lines": [ + { + "bbox": [ + 313, + 422, + 555, + 465 + ], + "spans": [ + { + "bbox": [ + 313, + 422, + 555, + 465 + ], + "type": "text", + "content": "Figure 1. Compared to SOTA UAV trackers on UAVDT, our ORTrack-DeiT sets a new record with " + }, + { + "bbox": [ + 313, + 422, + 555, + 465 + ], + "type": "inline_equation", + "content": "83.4\\%" + }, + { + "bbox": [ + 313, + 422, + 555, + 465 + ], + "type": "text", + "content": " precision and a speed of 236 FPS. Our ORTrack-D-DeiT strikes a better trade-off with " + }, + { + "bbox": [ + 313, + 422, + 555, + 465 + ], + "type": "inline_equation", + "content": "82.5\\%" + }, + { + "bbox": [ + 313, + 422, + 555, + 465 + ], + "type": "text", + "content": " precision and a speed of about 313 FPS." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 494, + 555, + 565 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 494, + 555, + 565 + ], + "spans": [ + { + "bbox": [ + 313, + 494, + 555, + 565 + ], + "type": "text", + "content": "efficiency due to UAVs' restricted battery life and computational resources [5, 42, 80, 83]. Consequently, designing an effective UAV tracker requires a delicate balance between precision and efficiency. It needs to ensure accuracy while being conscious of the UAV's energy and computational constraints." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 570, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 570, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 570, + 556, + 715 + ], + "type": "text", + "content": "In recent years, there has been a notable shift from discriminative correlation filters (DCF)-based methods, because of their unsatisfactory robustness, towards DL-based approaches, particularly with the adoption of single-stream architectures that integrate feature extraction and fusion via pre-trained Vision Transformer (ViT) backbone networks. This single-stream paradigm has proven highly effective in generic visual tracking, as evidenced by the success of recent methods such as OSTrack [91], SimTrack [8], Mixformer [13], and DropMAE [82]. Building on these advancements, Aba-VTrack [44] introduces a lightweight DL-based tracker within this framework, employing an adap" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 208, + 37, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 208, + 37, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 208, + 37, + 559 + ], + "type": "text", + "content": "arXiv:2504.09228v1 [cs.CV] 12 Apr 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 702, + 232, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 702, + 232, + 713 + ], + "spans": [ + { + "bbox": [ + 70, + 702, + 232, + 713 + ], + "type": "text", + "content": "† Equal contribution. * Corresponding authors." + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 57, + 72, + 294, + 226 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 72, + 294, + 226 + ], + "spans": [ + { + "bbox": [ + 57, + 72, + 294, + 226 + ], + "type": "text", + "content": "tive and background-aware token computation method to enhance inference speed, which demonstrates remarkable precision and speed for real-time UAV tracking. However, the use of a variable number of tokens in Aba-VTrack incurs significant time costs, primarily due to the unstructured access operations required during inference. Adding to this, it also grappled with establishing robustness when facing target occlusion, a challenge common in UAV tracking often triggered by obstructive elements like buildings, mountains, trees, and so forth. The problem is exacerbated by the fact that UAVs may not always be capable of circumventing these impediments due to potential large-scale movements involved." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 57, + 231, + 294, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 231, + 294, + 638 + ], + "spans": [ + { + "bbox": [ + 57, + 231, + 294, + 638 + ], + "type": "text", + "content": "To address these issues, we introduce a novel framework designed to enhance the occlusion robustness of ViTs for UAV tracking. Our approach, termed ORTrack, aims to learn ViT-based trackers that maintain robust feature representations even in the presence of target occlusion. This is achieved by enforcing an invariance in the feature representation of the target with respect to random masking operations modeled by a spatial Cox process. The random masking serves as a simulation of target occlusion, which is expected to mimic real occlusion challenges in UAV tracking and aid in learning Occlusion-Robust Representations (ORR). Notably, our method for learning occlusion-robust representation simply uses a Mean Squared Error (MSE) loss during training, adding no extra computational load during inference. Additionally, to enhance efficiency for real-time applications, we introduce an Adaptive Feature-Based Knowledge Distillation (AFKD) method. This method creates a more compact tracker, named ORTrack-D, which adaptively mimics the behavior of the teacher model ORTrack based on the complexity of the tracking task during training. The reasoning is that the teacher model, in its pursuit of powerful representations, may compromise its generalizability. Hence, in situations where generalizability is vital, the student model may perform better, and closely mimicking the teacher's behavior becomes less important. We use the deviation of GIoU loss [67] from its average value to quantify the difficulty of the tracking task, which makes sense as loss value is a commonly used criteria to define hard samples [70, 74, 77]. ORTrack-D maintains much of ORTrack's performance with higher efficiency, making it better suited for deployment in resource-constrained environments typical of UAV applications. Extensive experiments on four benchmarks show that our method achieves state-of-the-art performance." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 57, + 642, + 294, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 642, + 294, + 712 + ], + "spans": [ + { + "bbox": [ + 57, + 642, + 294, + 712 + ], + "type": "text", + "content": "In summary, our contributions are as follows: (i) We propose to learn Occlusion-Robust Representations (ORR) by imposing an invariance in the feature representation of the target with respect to random masking operations modeled by a spatial Cox process, which can be easily integrated into other tracking frameworks without requiring additional ar" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 317, + 72, + 553, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 72, + 553, + 215 + ], + "spans": [ + { + "bbox": [ + 317, + 72, + 553, + 215 + ], + "type": "text", + "content": "chitectures or increasing inference time; (ii) We propose an Adaptive Feature-Based Knowledge Distillation (AFKD) method to further enhance efficiency, in which the student model adaptively mimics the behavior of the teacher model according to the task's difficulty, resulting in a significant increase in tracking speed while only minimally reducing accuracy; (iii) We introduce ORTrack, a family of efficient trackers based on these components, which integrates seamlessly with other ViT-based trackers. ORTrack demonstrates superior performance while maintaining extremely fast tracking speeds. Extensive evaluations show that ORTrack achieves state-of-the-art real-time performance." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 317, + 228, + 397, + 239 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 228, + 397, + 239 + ], + "spans": [ + { + "bbox": [ + 317, + 228, + 397, + 239 + ], + "type": "text", + "content": "2. Related work" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 317, + 248, + 412, + 260 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 248, + 412, + 260 + ], + "spans": [ + { + "bbox": [ + 317, + 248, + 412, + 260 + ], + "type": "text", + "content": "2.1. Visual Tracking." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 317, + 267, + 553, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 267, + 553, + 552 + ], + "spans": [ + { + "bbox": [ + 317, + 267, + 553, + 552 + ], + "type": "text", + "content": "In visual tracking, the primary approaches consist of DCF-based and DL-based trackers. DCF-based trackers are favored for UAV tracking due to their remarkable efficiency, but they face difficulties in maintaining robustness under complex conditions [31, 42, 46]. Recently developed lightweight DL-based trackers have improved tracking precision and robustness for UAV tracking [4, 5]; however, their efficiency lags behind that of most DCF-based trackers. Model compression techniques like those in [80, 83] have been used to further boost efficiency, yet these trackers still face issues with tracking precision. Vision Transformers (ViTs) are gaining traction for streamlining and unifying frameworks in visual tracking, as seen in studies like [13, 85, 86, 89, 91]. While these frameworks are compact and efficient, few are based on lightweight ViTs, making them impractical for real-time UAV tracking. To address this, Aba-ViTrack [44] used lightweight ViTs and an adaptive, background-aware token computation method to enhance efficiency for real-time UAV tracking. However, the variable token number in this approach necessitates unstructured access operations, leading to significant time costs. In this work, we aim to improve the efficiency of ViTs for UAV tracking through knowledge distillation, a more structured method." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 317, + 563, + 533, + 575 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 563, + 533, + 575 + ], + "spans": [ + { + "bbox": [ + 317, + 563, + 533, + 575 + ], + "type": "text", + "content": "2.2. Occlusion-Robust Feature Representation." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 317, + 582, + 553, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 582, + 553, + 712 + ], + "spans": [ + { + "bbox": [ + 317, + 582, + 553, + 712 + ], + "type": "text", + "content": "Occlusion-robust feature representation is crucial in computer vision and image processing. It involves developing methods that can recognize and process objects in images even when parts are hidden or occluded [62, 76]. Early efforts often relied on handcrafted features, active appearance models, motion analysis, sensor fusion, etc [7, 33, 51, 71]. While effective in some cases, these methods struggled with the complexity and variability of real-world visual data. The advent of deep learning revolutionized the field. Many studies have applied Convolutional Neural Networks (CNNs) and other deep architectures to" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 288 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 288 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 288 + ], + "type": "text", + "content": "extract occlusion-robust representations [35, 62, 66, 76]. These approaches use deep models to capture complex patterns and variations in visual data, making learned features resilient to occlusions and having proven valuable for many computer vision applications, such as action recognition [17, 88], pose estimation [62, 95], and object detection [12, 36]. The exploration of occlusion-robust representations in visual tracking has also demonstrated great success [1, 6, 27, 34, 39, 58, 59, 61, 94]. However, to our knowledge, there is a dearth of research to explore learning occlusion-robust ViTs particularly in a unified framework for UAV tracking. In this study, we delve into the exploration of learning occlusion-robust feature representations based on ViTs by simulating occlusion challenges using random masking modeled by a spatial Cox process, specifically tailored for UAV tracking. This study represents the first use of ViTs for acquiring occlusion-robust feature representations in UAV tracking." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 294, + 188, + 306 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 294, + 188, + 306 + ], + "spans": [ + { + "bbox": [ + 55, + 294, + 188, + 306 + ], + "type": "text", + "content": "2.3. Knowledge Distillation." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 312, + 295, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 312, + 295, + 612 + ], + "spans": [ + { + "bbox": [ + 55, + 312, + 295, + 612 + ], + "type": "text", + "content": "Knowledge distillation is a technique used to compress models by transferring knowledge from a complex \"teacher\" model to a simpler \"student\" model, with the aim of maintaining performance while reducing computational resources and memory usage [63, 75]. It involves various types of knowledge, distillation strategies, and teacher-student architectures, typically falling into three categories: response-based, feature-based, and relation-based distillation [26, 63, 78]. Widely applied in tasks such as image classification [64], object detection [9], and neural machine translation [42], it offers potential to improve the efficiency and even effectiveness of deep learning models. Recently, it has been successfully utilized to enhance the efficiency of DL-based trackers. For instance, Li et al. [41] used mask-guided self-distillation to compress Siamese-based visual trackers. Sun et al. [72] introduced a lightweight dual Siamese tracker for hyperspectral object tracking, using a spatial-spectral knowledge distillation method to learn from a deep tracker. However, these techniques are mainly Siamese-based and tailored to specific tracking frameworks, posing challenges for adaptation to our ViT-based approach. In this study, we propose a simple yet effective feature-based knowledge distillation method, in which the student adaptively replicate the behavior of the teacher based on the complexity of the tracking task during training." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 621, + 111, + 633 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 621, + 111, + 633 + ], + "spans": [ + { + "bbox": [ + 55, + 621, + 111, + 633 + ], + "type": "text", + "content": "3. Method" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 641, + 294, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 641, + 294, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 641, + 294, + 714 + ], + "type": "text", + "content": "In this section, we first provide a brief overview of our end-to-end tracking framework, named ORTrack, as shown in Figure 2. Then, we introduce the occlusion-robust representation learning based on spatial Cox processes and the method of adaptive knowledge distillation. Finally, we detail the prediction head and training loss." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 314, + 72, + 382, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 72, + 382, + 83 + ], + "spans": [ + { + "bbox": [ + 314, + 72, + 382, + 83 + ], + "type": "text", + "content": "3.1. Overview" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 90, + 555, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 90, + 555, + 413 + ], + "spans": [ + { + "bbox": [ + 313, + 90, + 555, + 413 + ], + "type": "text", + "content": "The proposed ORTrack introduces an novel single-stream tracking framework, featuring a spatial Cox process-based masking for occlusion-robust representation learning and an adaptive feature-based knowledge distillation pipeline. ORTrack consists of two sequential training phases: the teacher model training pipeline for learning occlusion-robust representations, followed by the student training pipeline involving adaptive knowledge distillation. In the teacher model training phase, the input includes a target template " + }, + { + "bbox": [ + 313, + 90, + 555, + 413 + ], + "type": "inline_equation", + "content": "Z \\in \\mathbb{R}^{3 \\times H_z \\times W_z}" + }, + { + "bbox": [ + 313, + 90, + 555, + 413 + ], + "type": "text", + "content": " of spatial size " + }, + { + "bbox": [ + 313, + 90, + 555, + 413 + ], + "type": "inline_equation", + "content": "H_z \\times W_z" + }, + { + "bbox": [ + 313, + 90, + 555, + 413 + ], + "type": "text", + "content": ", a randomly masked target template " + }, + { + "bbox": [ + 313, + 90, + 555, + 413 + ], + "type": "inline_equation", + "content": "Z' = \\mathfrak{m}(Z)" + }, + { + "bbox": [ + 313, + 90, + 555, + 413 + ], + "type": "text", + "content": ", and a search image " + }, + { + "bbox": [ + 313, + 90, + 555, + 413 + ], + "type": "inline_equation", + "content": "X \\in \\mathbb{R}^{3 \\times H_x \\times W_x}" + }, + { + "bbox": [ + 313, + 90, + 555, + 413 + ], + "type": "text", + "content": " of spatial size " + }, + { + "bbox": [ + 313, + 90, + 555, + 413 + ], + "type": "inline_equation", + "content": "H_x \\times W_x" + }, + { + "bbox": [ + 313, + 90, + 555, + 413 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 313, + 90, + 555, + 413 + ], + "type": "inline_equation", + "content": "\\mathfrak{m}(\\cdot)" + }, + { + "bbox": [ + 313, + 90, + 555, + 413 + ], + "type": "text", + "content": " represents the random masking operation that masks out non-overlap patches of size " + }, + { + "bbox": [ + 313, + 90, + 555, + 413 + ], + "type": "inline_equation", + "content": "b \\times b" + }, + { + "bbox": [ + 313, + 90, + 555, + 413 + ], + "type": "text", + "content": " with a certain masking ratio " + }, + { + "bbox": [ + 313, + 90, + 555, + 413 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 313, + 90, + 555, + 413 + ], + "type": "text", + "content": ". To achieve occlusion-robust representation with ViTs, we minimize the mean squared error (MSE) between two versions of the template representation: one with random masking and one without. During the training of the student model, the teacher's weights remain fixed while both the teacher and student models receive inputs " + }, + { + "bbox": [ + 313, + 90, + 555, + 413 + ], + "type": "inline_equation", + "content": "Z" + }, + { + "bbox": [ + 313, + 90, + 555, + 413 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 90, + 555, + 413 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 313, + 90, + 555, + 413 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 313, + 90, + 555, + 413 + ], + "type": "inline_equation", + "content": "\\mathfrak{B}_T" + }, + { + "bbox": [ + 313, + 90, + 555, + 413 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 90, + 555, + 413 + ], + "type": "inline_equation", + "content": "\\mathfrak{B}_S" + }, + { + "bbox": [ + 313, + 90, + 555, + 413 + ], + "type": "text", + "content": " represent the backbones of the teacher and student, respectively. In our implementation, " + }, + { + "bbox": [ + 313, + 90, + 555, + 413 + ], + "type": "inline_equation", + "content": "\\mathfrak{B}_T" + }, + { + "bbox": [ + 313, + 90, + 555, + 413 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 90, + 555, + 413 + ], + "type": "inline_equation", + "content": "\\mathfrak{B}_S" + }, + { + "bbox": [ + 313, + 90, + 555, + 413 + ], + "type": "text", + "content": " share the same structure of the ViT layer but differ in the number of layers. Feature-based knowledge distillation is used to transfer the knowledge embedded in the teacher model's backbone features to the student model through an adaptive distillation loss." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 420, + 553, + 445 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 420, + 553, + 445 + ], + "spans": [ + { + "bbox": [ + 313, + 420, + 553, + 445 + ], + "type": "text", + "content": "3.2. Occlusion-Robust Representations (ORR) Based on Spatial Cox Processes" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 450, + 555, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 450, + 555, + 640 + ], + "spans": [ + { + "bbox": [ + 313, + 450, + 555, + 640 + ], + "type": "text", + "content": "To begin, we describe two random masking operations used to simulate occlusion challenges: one from MAE [28] and our proposed method based on a Spatial Cox process, denoted by " + }, + { + "bbox": [ + 313, + 450, + 555, + 640 + ], + "type": "inline_equation", + "content": "\\mathfrak{m}_{\\mathrm{U}}" + }, + { + "bbox": [ + 313, + 450, + 555, + 640 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 450, + 555, + 640 + ], + "type": "inline_equation", + "content": "\\mathfrak{m}_{\\mathrm{C}}" + }, + { + "bbox": [ + 313, + 450, + 555, + 640 + ], + "type": "text", + "content": ", respectively. Although " + }, + { + "bbox": [ + 313, + 450, + 555, + 640 + ], + "type": "inline_equation", + "content": "\\mathfrak{m}_{\\mathrm{U}}" + }, + { + "bbox": [ + 313, + 450, + 555, + 640 + ], + "type": "text", + "content": " allows the model to learn robust representations that are less sensitive to noise or missing information by randomly ignoring certain parts of the input data during training [28], it is less effective when used to simulate occlusion since each spatial position (in the sense of block size) is masked out with equal probability, especially in our situation where the target template generally contains background. To ensure that the target is masked out as expected with higher probabilities at a given masking ratio, thereby making the occlusion simulation more effective, we employ a finite Cox process [32] to model this masking operation, which is detailed as follows." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 642, + 556, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 642, + 556, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 642, + 556, + 714 + ], + "type": "text", + "content": "Define two associated random matrices " + }, + { + "bbox": [ + 313, + 642, + 556, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{m} = (m_{i,j})" + }, + { + "bbox": [ + 313, + 642, + 556, + 714 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 642, + 556, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{b} = (b_{i,j})" + }, + { + "bbox": [ + 313, + 642, + 556, + 714 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 642, + 556, + 714 + ], + "type": "inline_equation", + "content": "1 \\leqslant i \\leqslant H_z / b" + }, + { + "bbox": [ + 313, + 642, + 556, + 714 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 642, + 556, + 714 + ], + "type": "inline_equation", + "content": "1 \\leqslant j \\leqslant W_z / b" + }, + { + "bbox": [ + 313, + 642, + 556, + 714 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 313, + 642, + 556, + 714 + ], + "type": "inline_equation", + "content": "m_{i,j} \\sim \\mathcal{U}(0,1)" + }, + { + "bbox": [ + 313, + 642, + 556, + 714 + ], + "type": "text", + "content": " (i.e., " + }, + { + "bbox": [ + 313, + 642, + 556, + 714 + ], + "type": "inline_equation", + "content": "m_{i,j}" + }, + { + "bbox": [ + 313, + 642, + 556, + 714 + ], + "type": "text", + "content": " follows a uniform distribution over the interval [0, 1]), " + }, + { + "bbox": [ + 313, + 642, + 556, + 714 + ], + "type": "inline_equation", + "content": "b_{i,j} \\in \\{0,1\\}" + }, + { + "bbox": [ + 313, + 642, + 556, + 714 + ], + "type": "text", + "content": " equals 1 if " + }, + { + "bbox": [ + 313, + 642, + 556, + 714 + ], + "type": "inline_equation", + "content": "m_{i,j} \\in \\mathrm{TopK}(\\mathbf{m}, K)" + }, + { + "bbox": [ + 313, + 642, + 556, + 714 + ], + "type": "text", + "content": ", and 0 otherwise. " + }, + { + "bbox": [ + 313, + 642, + 556, + 714 + ], + "type": "inline_equation", + "content": "\\mathrm{TopK}(\\mathbf{m}, K)" + }, + { + "bbox": [ + 313, + 642, + 556, + 714 + ], + "type": "text", + "content": " returns the " + }, + { + "bbox": [ + 313, + 642, + 556, + 714 + ], + "type": "inline_equation", + "content": "K = \\lfloor (1 - \\sigma)H_zW_z \\rfloor" + }, + { + "bbox": [ + 313, + 642, + 556, + 714 + ], + "type": "text", + "content": " largest elements from " + }, + { + "bbox": [ + 313, + 642, + 556, + 714 + ], + "type": "inline_equation", + "content": "\\mathbf{m}" + }, + { + "bbox": [ + 313, + 642, + 556, + 714 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 313, + 642, + 556, + 714 + ], + "type": "inline_equation", + "content": "\\lfloor x \\rfloor" + }, + { + "bbox": [ + 313, + 642, + 556, + 714 + ], + "type": "text", + "content": " rounds " + }, + { + "bbox": [ + 313, + 642, + 556, + 714 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 313, + 642, + 556, + 714 + ], + "type": "text", + "content": " to" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 109, + 68, + 509, + 264 + ], + "blocks": [ + { + "bbox": [ + 109, + 68, + 509, + 264 + ], + "lines": [ + { + "bbox": [ + 109, + 68, + 509, + 264 + ], + "spans": [ + { + "bbox": [ + 109, + 68, + 509, + 264 + ], + "type": "image", + "image_path": "4082fd996ec64079d2abc075f06f60d7af465a0f16f8a18689b4c86819470823.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 54, + 273, + 555, + 308 + ], + "lines": [ + { + "bbox": [ + 54, + 273, + 555, + 308 + ], + "spans": [ + { + "bbox": [ + 54, + 273, + 555, + 308 + ], + "type": "text", + "content": "Figure 2. Overview of the proposed ORTrack framework, which includes separate training pipelines for a teacher and a student model. Note that the spatial Cox process-based masking and occlusion-robust representation learning are applied only in the teacher pipeline. Once the teacher is trained, its weights are fixed for training the student model with the proposed adaptive knowledge distillation." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "spans": [ + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "text", + "content": "the nearest integer. Mathematically, " + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "inline_equation", + "content": "\\mathfrak{m}_{\\mathrm{U}}(Z) = Z\\odot (\\mathbf{b}\\otimes \\mathbf{1})" + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "inline_equation", + "content": "\\odot" + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "text", + "content": " denotes the Hadamard product and " + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "inline_equation", + "content": "\\otimes" + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "text", + "content": " denotes the tensor product, " + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "inline_equation", + "content": "\\mathbf{1}" + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "text", + "content": " is an all-ones matrix of size " + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "inline_equation", + "content": "b\\times b" + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "text", + "content": ". Before defining " + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "inline_equation", + "content": "\\mathfrak{m}_{\\mathrm{C}}" + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "text", + "content": ", we establish core notations relevant to spatial Cox processes. It extends the concept of spatial inhomogeneous Poisson point processes by incorporating a random intensity function, which, in turn, is defined as a Poisson point process with an intensity determined by a location-dependent function in the underlying space. For Euclidean space " + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^2" + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "text", + "content": ", an inhomogeneous Poisson point process is defined by a locally integrable positive intensity function " + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "inline_equation", + "content": "\\lambda \\colon \\mathbb{R}^2\\to [0,\\infty)" + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "text", + "content": ", such that for every bounded region " + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "inline_equation", + "content": "\\mathcal{B}" + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "text", + "content": " the integral " + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "inline_equation", + "content": "\\Lambda (\\mathcal{B}) = \\int_{\\mathcal{B}}\\lambda (x,y)\\mathrm{d}xdy" + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "text", + "content": " is finite, where " + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "inline_equation", + "content": "\\Lambda (\\mathcal{B})" + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "text", + "content": " has the interpretation of being the expected number of points of the Poisson process located in " + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "inline_equation", + "content": "\\mathcal{B}" + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "text", + "content": ", and for every collection of disjoint bounded Borel measurable sets " + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "inline_equation", + "content": "\\mathcal{B}_1,\\dots,\\mathcal{B}_k" + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "text", + "content": " [60], its number distributions is defined by " + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "inline_equation", + "content": "\\operatorname*{Pr}\\{\\mathrm{N}(\\mathcal{B}_i) = n_i,i = 1,\\ldots ,k\\} = \\prod_{i = 1}^{k}\\frac{(\\Lambda(\\mathcal{B}_i))^{n_i}}{n_i!} e^{-\\Lambda (\\mathcal{B}_i)}" + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "inline_equation", + "content": "n_i\\in \\mathbb{Z}^{0 + }" + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "inline_equation", + "content": "\\operatorname*{Pr}" + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "text", + "content": " denotes the probability measure, " + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "inline_equation", + "content": "\\mathrm{N}" + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "text", + "content": " indicates the random counting measure such that " + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "inline_equation", + "content": "\\Lambda (\\mathcal{B}) = \\mathbb{E}[\\mathrm{N}(\\mathcal{B})]" + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "inline_equation", + "content": "\\mathbb{E}" + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "text", + "content": " is the expectation operator. In particular, the conditional distribution of the points in a bounded set " + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "inline_equation", + "content": "\\mathcal{B}" + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "text", + "content": " given that " + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "inline_equation", + "content": "\\mathrm{N}(\\mathcal{B}) = n\\in \\mathbb{Z}^{0 + }" + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "text", + "content": " is not uniform, and " + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "inline_equation", + "content": "f_{n}(p_{1},\\dots,p_{n}) = \\prod_{n}^{i = 1}\\frac{\\lambda(p_{i})}{\\Lambda(\\mathcal{B})}" + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "inline_equation", + "content": "p_1,\\dots,p_n\\in \\mathcal{B}" + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "text", + "content": " defines the corresponding location density function of the " + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "text", + "content": " points. Since a Cox process can be regarded as the result of a two-stage random mechanism for which it is sometimes termed 'doubly stochastic Poisson process' [32], the finite Cox processes can be simulated in a straightforward way based on the hierarchical nature of the model. Specifically, in the first step, the intensity " + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "inline_equation", + "content": "\\lambda (x,y)" + }, + { + "bbox": [ + 56, + 327, + 297, + 704 + ], + "type": "text", + "content": " is generated. In the second step, an in" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 313, + 327, + 555, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 327, + 555, + 411 + ], + "spans": [ + { + "bbox": [ + 313, + 327, + 555, + 411 + ], + "type": "text", + "content": "homogeneous Poisson point process is simulated using the generated " + }, + { + "bbox": [ + 313, + 327, + 555, + 411 + ], + "type": "inline_equation", + "content": "\\lambda(x, y)" + }, + { + "bbox": [ + 313, + 327, + 555, + 411 + ], + "type": "text", + "content": " [32, 53]. The thinning algorithm [11] is used here for simulating inhomogeneous Poisson point processes. It involves simulating a homogeneous Poisson point process with a higher rate than the maximum possible rate of the inhomogeneous process, and then \"thinning\" out the generated points to match the desired intensity function." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 313, + 411, + 556, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 411, + 556, + 473 + ], + "spans": [ + { + "bbox": [ + 313, + 411, + 556, + 473 + ], + "type": "text", + "content": "In this work, the randomness of the intensity function is modeled by a random variable " + }, + { + "bbox": [ + 313, + 411, + 556, + 473 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 313, + 411, + 556, + 473 + ], + "type": "text", + "content": " that has a Poisson distribution with expectation of " + }, + { + "bbox": [ + 313, + 411, + 556, + 473 + ], + "type": "inline_equation", + "content": "\\varsigma" + }, + { + "bbox": [ + 313, + 411, + 556, + 473 + ], + "type": "text", + "content": ", namely, " + }, + { + "bbox": [ + 313, + 411, + 556, + 473 + ], + "type": "inline_equation", + "content": "\\operatorname{Pr}\\{\\Gamma = k\\} = \\frac{\\varsigma^k e^{-\\varsigma}}{k!}" + }, + { + "bbox": [ + 313, + 411, + 556, + 473 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 313, + 411, + 556, + 473 + ], + "type": "inline_equation", + "content": "k \\in \\mathbb{Z}^{0+}" + }, + { + "bbox": [ + 313, + 411, + 556, + 473 + ], + "type": "text", + "content": ". The intensity function of the inhomogeneous Poisson point process is then given by" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 376, + 477, + 555, + 506 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 376, + 477, + 555, + 506 + ], + "spans": [ + { + "bbox": [ + 376, + 477, + 555, + 506 + ], + "type": "interline_equation", + "content": "\\lambda (x, y) = \\frac {\\Gamma e ^ {- (x ^ {2} + y ^ {2})}}{\\int_ {\\mathcal {B}} e ^ {- (x ^ {2} + y ^ {2})} d x d y}. \\tag {1}", + "image_path": "37fb7df7758917810ca1c85bac13eb3bdf9ab53cfda5b17d8b280d79f44bda26.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 510, + 556, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 510, + 556, + 688 + ], + "spans": [ + { + "bbox": [ + 313, + 510, + 556, + 688 + ], + "type": "text", + "content": "Note that " + }, + { + "bbox": [ + 313, + 510, + 556, + 688 + ], + "type": "inline_equation", + "content": "\\lambda(x, y)" + }, + { + "bbox": [ + 313, + 510, + 556, + 688 + ], + "type": "text", + "content": " is a bell-shape function that gives more intensities to the central area of " + }, + { + "bbox": [ + 313, + 510, + 556, + 688 + ], + "type": "inline_equation", + "content": "\\mathcal{B}" + }, + { + "bbox": [ + 313, + 510, + 556, + 688 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 313, + 510, + 556, + 688 + ], + "type": "inline_equation", + "content": "\\mathcal{B}" + }, + { + "bbox": [ + 313, + 510, + 556, + 688 + ], + "type": "text", + "content": " denote the rectangle region of size " + }, + { + "bbox": [ + 313, + 510, + 556, + 688 + ], + "type": "inline_equation", + "content": "H_z / b \\times W_z / b" + }, + { + "bbox": [ + 313, + 510, + 556, + 688 + ], + "type": "text", + "content": " representing the template region. If we simulate the Cox process within " + }, + { + "bbox": [ + 313, + 510, + 556, + 688 + ], + "type": "inline_equation", + "content": "\\mathcal{B}" + }, + { + "bbox": [ + 313, + 510, + 556, + 688 + ], + "type": "text", + "content": " and denote a resulted point pattern by " + }, + { + "bbox": [ + 313, + 510, + 556, + 688 + ], + "type": "inline_equation", + "content": "\\Xi" + }, + { + "bbox": [ + 313, + 510, + 556, + 688 + ], + "type": "text", + "content": ", we can obtain a matrix " + }, + { + "bbox": [ + 313, + 510, + 556, + 688 + ], + "type": "inline_equation", + "content": "\\mathbf{b}' = (b_{i,j}')_{1 \\leqslant i \\leqslant H_z / b, 1 \\leqslant i \\leqslant W_z / b}" + }, + { + "bbox": [ + 313, + 510, + 556, + 688 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 313, + 510, + 556, + 688 + ], + "type": "inline_equation", + "content": "b_{i,j}'" + }, + { + "bbox": [ + 313, + 510, + 556, + 688 + ], + "type": "text", + "content": " equals 1 if " + }, + { + "bbox": [ + 313, + 510, + 556, + 688 + ], + "type": "inline_equation", + "content": "(i, j) \\in \\Xi" + }, + { + "bbox": [ + 313, + 510, + 556, + 688 + ], + "type": "text", + "content": ", and 0 otherwise, with which our " + }, + { + "bbox": [ + 313, + 510, + 556, + 688 + ], + "type": "inline_equation", + "content": "\\mathfrak{m}_{\\mathbb{C}}" + }, + { + "bbox": [ + 313, + 510, + 556, + 688 + ], + "type": "text", + "content": " can be defined as " + }, + { + "bbox": [ + 313, + 510, + 556, + 688 + ], + "type": "inline_equation", + "content": "\\mathfrak{m}_{\\mathbb{C}}(Z) = Z \\odot (\\mathbf{b}' \\otimes \\mathbf{1})" + }, + { + "bbox": [ + 313, + 510, + 556, + 688 + ], + "type": "text", + "content": ". It is worthy of note that if " + }, + { + "bbox": [ + 313, + 510, + 556, + 688 + ], + "type": "inline_equation", + "content": "\\varsigma = [(1 - \\sigma)H_zW_z]" + }, + { + "bbox": [ + 313, + 510, + 556, + 688 + ], + "type": "text", + "content": ", since " + }, + { + "bbox": [ + 313, + 510, + 556, + 688 + ], + "type": "inline_equation", + "content": "\\mathbb{E}[\\Lambda(\\mathcal{B})] = \\mathbb{E}[\\int_{\\mathcal{B}} \\lambda(x, y) dxdy] = \\mathbb{E}[\\Gamma] = \\varsigma" + }, + { + "bbox": [ + 313, + 510, + 556, + 688 + ], + "type": "text", + "content": ", in this case, the expected masking ratio of our masking operation is equal to the masking ratio of " + }, + { + "bbox": [ + 313, + 510, + 556, + 688 + ], + "type": "inline_equation", + "content": "\\mathfrak{m}_{\\mathbb{C}}" + }, + { + "bbox": [ + 313, + 510, + 556, + 688 + ], + "type": "text", + "content": ". Thus, in addition to inhomogeneous intensity, our method can simulate more diverse pattern of occlusion due to the introduced randomness of the masking ratio." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 689, + 555, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 689, + 555, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 689, + 555, + 714 + ], + "type": "text", + "content": "We denote the total number of tokens by " + }, + { + "bbox": [ + 313, + 689, + 555, + 714 + ], + "type": "inline_equation", + "content": "\\mathcal{K}" + }, + { + "bbox": [ + 313, + 689, + 555, + 714 + ], + "type": "text", + "content": ", the embedding dimension of each token by " + }, + { + "bbox": [ + 313, + 689, + 555, + 714 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 313, + 689, + 555, + 714 + ], + "type": "text", + "content": ", and all the tokens out" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "text", + "content": "put by the " + }, + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "text", + "content": "-th layer of " + }, + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "inline_equation", + "content": "\\mathfrak{B}_T" + }, + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "text", + "content": " with respect to inputs " + }, + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "inline_equation", + "content": "Z" + }, + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "text", + "content": " by " + }, + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "inline_equation", + "content": "\\mathbf{t}_{1:\\mathcal{K}}^L (Z,X;\\mathfrak{B}_T)\\in \\mathbb{R}^{\\mathcal{K}\\times d}" + }, + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "inline_equation", + "content": "\\mathbf{t}_{\\mathcal{K}_Z\\cup \\mathcal{K}_X}^L (Z,X;\\mathfrak{B}_T) = \\mathbf{t}_{1:\\mathcal{K}}^L (Z,X;\\mathfrak{B}_T)" + }, + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "inline_equation", + "content": "\\mathcal{K}_Z\\cup \\mathcal{K}_X = [1,\\mathcal{K}]" + }, + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "inline_equation", + "content": "\\mathbf{t}_{\\mathcal{K}_Z}^L" + }, + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "inline_equation", + "content": "\\mathbf{t}_{\\mathcal{K}_X}^L" + }, + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "text", + "content": " represent the tokens corresponding to the template and the search image, respectively. By the same token, the output tokens corresponding to inputs " + }, + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "inline_equation", + "content": "Z'" + }, + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "text", + "content": " are " + }, + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "inline_equation", + "content": "\\mathbf{t}_{1:\\mathcal{K}}^L (Z',X;\\mathfrak{B}_T)" + }, + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "text", + "content": ". The feature representations of " + }, + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "inline_equation", + "content": "Z" + }, + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "inline_equation", + "content": "Z'" + }, + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "text", + "content": " can be recovered by tracking their token indices in respective ordered sequences, which specifically are " + }, + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "inline_equation", + "content": "t_{1:\\mathcal{K}_z}^L (Z,X;\\mathfrak{B}_T)" + }, + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "inline_equation", + "content": "t_{1:\\mathcal{K}_z}^L (Z',X;\\mathfrak{B}_T)" + }, + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "text", + "content": ", respectively. The core idea of our occlusion-robust representations learning is that the mean square error between the feature representation of " + }, + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "inline_equation", + "content": "Z" + }, + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "text", + "content": " and that of " + }, + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "inline_equation", + "content": "Z'" + }, + { + "bbox": [ + 55, + 72, + 296, + 242 + ], + "type": "text", + "content": " is minimized, which is implemented by minimizing the following MSE loss," + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 80, + 248, + 295, + 262 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 248, + 295, + 262 + ], + "spans": [ + { + "bbox": [ + 80, + 248, + 295, + 262 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {o r r} = \\left\\| t _ {1: \\mathcal {K} _ {z}} ^ {L} (Z, X; \\mathfrak {B} _ {T}) - t _ {1: \\mathcal {K} _ {z}} ^ {L} \\left(Z ^ {\\prime}, X; \\mathfrak {B} _ {T}\\right) \\right\\| ^ {2}. \\tag {2}", + "image_path": "c82e91d033841d1ddd340a9f4b0e674e188bc0ed043c7dd185a55c438f935319.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 269, + 296, + 329 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 269, + 296, + 329 + ], + "spans": [ + { + "bbox": [ + 55, + 269, + 296, + 329 + ], + "type": "text", + "content": "During inference, only " + }, + { + "bbox": [ + 55, + 269, + 296, + 329 + ], + "type": "inline_equation", + "content": "[Z,X]" + }, + { + "bbox": [ + 55, + 269, + 296, + 329 + ], + "type": "text", + "content": " is input to the model without the need for random template masking. Consequently, our method incurs no additional computational cost during inference. Notably, our method is independent of the ViTs used, any efficient ViTs can work in our framework." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 336, + 296, + 361 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 336, + 296, + 361 + ], + "spans": [ + { + "bbox": [ + 55, + 336, + 296, + 361 + ], + "type": "text", + "content": "3.3. Adaptive Feature-Based Knowledge Distillation (AFKD)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 366, + 296, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 366, + 296, + 617 + ], + "spans": [ + { + "bbox": [ + 55, + 366, + 296, + 617 + ], + "type": "text", + "content": "Feature-based knowledge distillation is a technique in machine learning that trains a smaller student model to mimic a larger teacher model, which, instead of focusing only on final outputs, transfers intermediate features or representations from the teacher to the student [26, 78]. This method uses the detailed internal representations from the teacher model to improve the student's learning process. However, there is a risk that the student model might overfit to the specific features of the teacher model, rather than generalizing well to new data. This can be particularly problematic if the teacher model has learned spurious correlations in the data. To combat this, we propose adaptively transferring knowledge based on the difficulty of the tracking task. We quantify this difficulty using the deviation of the GIoU loss [67] (see Section 3.4) from its average value, calculated between the student's prediction and the ground truth. Adapting knowledge transfer based on difficulty ensures that the student model doesn't heavily adjust its weights on easy tasks, which it can handle already probably due to its generalizability. Instead, it focuses more on challenging scenarios where its feature representation is less effective." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 618, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 618, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 618, + 296, + 713 + ], + "type": "text", + "content": "Additionally, the choice of teacher-student architectures is crucial in knowledge distillation. Given the wide array of possible student models, we adopt a self-similar approach where the student model mirrors the teacher's architecture but employs a smaller ViT backbone, using fewer ViT blocks. This strategy simplifies the design and eliminates the need for additional alignment techniques that would otherwise be necessary due to mismatched feature di" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 72, + 555, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 555, + 180 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 555, + 180 + ], + "type": "text", + "content": "mensions. Lastly, layer selection and the metric of feature similarity are also crucial aspects of feature-based knowledge distillation. Given MSE's popularity in feature-based knowledge distillation and to avoid potential complexity associated with using multiple layers, we employ MSE to penalize differences between the output feature representations of both the teacher and student model's backbones, i.e., " + }, + { + "bbox": [ + 313, + 72, + 555, + 180 + ], + "type": "inline_equation", + "content": "t_{1:\\mathcal{K}}^{L}(Z,X;\\mathfrak{B}_{T})" + }, + { + "bbox": [ + 313, + 72, + 555, + 180 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 72, + 555, + 180 + ], + "type": "inline_equation", + "content": "t_{1:\\mathcal{K}}^{L}(Z,X;\\mathfrak{B}_{S})" + }, + { + "bbox": [ + 313, + 72, + 555, + 180 + ], + "type": "text", + "content": ". The proposed adaptive knowledge distillation loss is defined by" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 186, + 569, + 210 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 186, + 569, + 210 + ], + "spans": [ + { + "bbox": [ + 313, + 186, + 569, + 210 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {a f k d} = (\\alpha + \\beta \\left(\\mathcal {L} _ {i o u} - \\overline {{\\mathcal {L} _ {i o u}}}\\right)) \\| t _ {1: \\mathcal {K}} ^ {L} (Z, X; \\mathfrak {B} _ {T}) - t _ {1: \\mathcal {K}} ^ {L} (Z, X; \\mathfrak {B} _ {S}) \\| ^ {2}, \\tag {3}", + "image_path": "2b133809dc2006deefd4113c9e26f835295f27716544a7f45f59a3fb96f5f56b.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 210, + 555, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 210, + 555, + 258 + ], + "spans": [ + { + "bbox": [ + 313, + 210, + 555, + 258 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 210, + 555, + 258 + ], + "type": "inline_equation", + "content": "\\alpha + \\beta (\\mathcal{L}_{iou} - \\overline{\\mathcal{L}_{iou}}) \\coloneqq \\varpi (\\mathcal{L}_{iou}; \\alpha, \\beta)" + }, + { + "bbox": [ + 313, + 210, + 555, + 258 + ], + "type": "text", + "content": " is a function of the deviation of GIoU loss from its average, with slop " + }, + { + "bbox": [ + 313, + 210, + 555, + 258 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 313, + 210, + 555, + 258 + ], + "type": "text", + "content": " and intercept " + }, + { + "bbox": [ + 313, + 210, + 555, + 258 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 313, + 210, + 555, + 258 + ], + "type": "text", + "content": ", used to quantify the difficulty of the tracking task." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 266, + 501, + 278 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 266, + 501, + 278 + ], + "spans": [ + { + "bbox": [ + 313, + 266, + 501, + 278 + ], + "type": "text", + "content": "3.4. Prediction Head and Training Loss" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 284, + 556, + 488 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 284, + 556, + 488 + ], + "spans": [ + { + "bbox": [ + 313, + 284, + 556, + 488 + ], + "type": "text", + "content": "Following the corner detection head in [13, 91], we use a prediction head consisting of multiple Conv-BN-ReLU layers to directly estimate the bounding box of the target. The output tokens corresponding to the search image are first reinterpreted to a 2D spatial feature map and then fed into the prediction head. The head outputs a local offset " + }, + { + "bbox": [ + 313, + 284, + 556, + 488 + ], + "type": "inline_equation", + "content": "\\mathbf{o} \\in [0,1]^{2 \\times H_x / P \\times W_x / P}" + }, + { + "bbox": [ + 313, + 284, + 556, + 488 + ], + "type": "text", + "content": ", a normalized bounding box size " + }, + { + "bbox": [ + 313, + 284, + 556, + 488 + ], + "type": "inline_equation", + "content": "\\mathbf{s} \\in [0,1]^{2 \\times H_x / P \\times W_x / P}" + }, + { + "bbox": [ + 313, + 284, + 556, + 488 + ], + "type": "text", + "content": ", and a target classification score " + }, + { + "bbox": [ + 313, + 284, + 556, + 488 + ], + "type": "inline_equation", + "content": "\\mathbf{p} \\in [0,1]^{H_x / P \\times W_x / P}" + }, + { + "bbox": [ + 313, + 284, + 556, + 488 + ], + "type": "text", + "content": " as prediction outcomes. The initial estimation of the target position depends on identifying the location with the highest classification score, i.e., " + }, + { + "bbox": [ + 313, + 284, + 556, + 488 + ], + "type": "inline_equation", + "content": "(x_c, y_c) = \\operatorname{argmax}_{(x,y)} \\mathbf{p}(x,y)" + }, + { + "bbox": [ + 313, + 284, + 556, + 488 + ], + "type": "text", + "content": ". The final target bounding box is estimated by " + }, + { + "bbox": [ + 313, + 284, + 556, + 488 + ], + "type": "inline_equation", + "content": "\\{(x_t, y_t); (w,h)\\} = \\{(x_c, y_c) + \\mathbf{o}(x_c, y_c); \\mathbf{s}(x_c, y_c)\\}" + }, + { + "bbox": [ + 313, + 284, + 556, + 488 + ], + "type": "text", + "content": ". For the tracking task, we adopt the weighted focal loss [40] for classification, a combination of " + }, + { + "bbox": [ + 313, + 284, + 556, + 488 + ], + "type": "inline_equation", + "content": "L_1" + }, + { + "bbox": [ + 313, + 284, + 556, + 488 + ], + "type": "text", + "content": " loss and GIoU loss [67] for bounding box regression. The total loss for tracking prediction is:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 362, + 495, + 553, + 509 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 362, + 495, + 553, + 509 + ], + "spans": [ + { + "bbox": [ + 362, + 495, + 553, + 509 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {p r e d} = \\mathcal {L} _ {c l s} + \\lambda_ {i o u} \\mathcal {L} _ {i o u} + \\lambda_ {L _ {1}} \\mathcal {L} _ {L _ {1}}, \\tag {4}", + "image_path": "8b76efc3ec6e4fe4cc0bf2f96a3188a5932a30f62326b9f8de3ca91405f2d0d4.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 515, + 556, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 515, + 556, + 612 + ], + "spans": [ + { + "bbox": [ + 313, + 515, + 556, + 612 + ], + "type": "text", + "content": "where the constants " + }, + { + "bbox": [ + 313, + 515, + 556, + 612 + ], + "type": "inline_equation", + "content": "\\lambda_{iou} = 2" + }, + { + "bbox": [ + 313, + 515, + 556, + 612 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 515, + 556, + 612 + ], + "type": "inline_equation", + "content": "\\lambda_{L_1} = 5" + }, + { + "bbox": [ + 313, + 515, + 556, + 612 + ], + "type": "text", + "content": " are set as in [13, 91]. The overall loss " + }, + { + "bbox": [ + 313, + 515, + 556, + 612 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_T = \\mathcal{L}_{pred} + \\gamma \\mathcal{L}_{orr}" + }, + { + "bbox": [ + 313, + 515, + 556, + 612 + ], + "type": "text", + "content": " is used to train the teacher end-to-end after loading the pretrained weights of the ViT trained with ImageNet [68], where the constant " + }, + { + "bbox": [ + 313, + 515, + 556, + 612 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 313, + 515, + 556, + 612 + ], + "type": "text", + "content": " is set to " + }, + { + "bbox": [ + 313, + 515, + 556, + 612 + ], + "type": "inline_equation", + "content": "2.0 \\times 10^{-4}" + }, + { + "bbox": [ + 313, + 515, + 556, + 612 + ], + "type": "text", + "content": ". After this training, we fix the weights of the teacher model, and employ the overall loss " + }, + { + "bbox": [ + 313, + 515, + 556, + 612 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_S = \\mathcal{L}_{pred} + \\mathcal{L}_{afkd}" + }, + { + "bbox": [ + 313, + 515, + 556, + 612 + ], + "type": "text", + "content": ", for end-to-end knowledge distillation training." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 621, + 395, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 621, + 395, + 635 + ], + "spans": [ + { + "bbox": [ + 313, + 621, + 395, + 635 + ], + "type": "text", + "content": "4. Experiments" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 641, + 555, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 641, + 555, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 641, + 555, + 715 + ], + "type": "text", + "content": "We evaluate our method on four UAV tracking benchmarks: DTB70 [45], UAVDT [19], VisDrone2018 [98], and UAV123 [57]. All experiments run on a PC with an i9-10850K processor, 16GB RAM, and an NVIDIA TitanX GPU. We compare our method against 26 state-of-the-art trackers, using their official codes and hyper" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 78, + 113, + 533, + 258 + ], + "blocks": [ + { + "bbox": [ + 55, + 70, + 555, + 103 + ], + "lines": [ + { + "bbox": [ + 55, + 70, + 555, + 103 + ], + "spans": [ + { + "bbox": [ + 55, + 70, + 555, + 103 + ], + "type": "text", + "content": "Table 1. Precision (Prec.), success rate (Succ.), and speed (FPS) comparison between ORTrack and lightweight trackers on four UAV tracking benchmarks, i.e., DTB70 [45], UAVDT [19], VisDrone2018 [98], and UAV123 [57]. Red, blue and green indicate the first, second and third place. Note that the percent symbol (\\%) is omitted for all Prec. and Succ. values." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 78, + 113, + 533, + 258 + ], + "lines": [ + { + "bbox": [ + 78, + 113, + 533, + 258 + ], + "spans": [ + { + "bbox": [ + 78, + 113, + 533, + 258 + ], + "type": "table", + "html": "
MethodSourceDTB70UAVDTVisDrone2018UAV123Avg.Avg.FPSFLOPs (GMac)Param. (M)
Prec.Succ.Prec.Succ.Prec.Succ.Prec.Succ.Prec.Succ.GPUCPU
DCF-basedKCF [29]TAPMI 1546.828.057.129.068.541.352.333.156.232.9-624.3--
fDSST [16]TPAMI 1753.435.766.638.369.851.058.340.562.041.4-193.4--
ECO_HC [14]CVPR 1763.544.869.441.680.858.171.049.671.248.5-83.5--
AutoTrack [46]CVPR 2071.647.871.845.078.857.368.947.272.849.3-57.8--
RACF [42]PR 2272.650.577.349.483.460.070.247.775.951.8-35.6--
CNN-basedHiFT [4]ICCV 2180.259.465.247.571.952.678.759.074.054.6160.3-7.29.9
TCTrack [5]CVPR 2281.262.272.553.079.959.480.060.578.458.8149.6-8.89.7
SGDViT [90]ICRA 2378.560.465.748.072.152.175.457.572.954.5110.5-11.323.3
DRCI [93]ICME 2381.461.884.059.083.460.076.759.781.460.1281.362.73.68.8
PRL-Track [22]IROSS 2479.560.673.153.572.653.879.159.376.156.8132.3-7.412.0
VIT-basedAba-ViTrack [44]ICCV 2385.966.483.459.986.165.386.466.485.564.5181.550.32.48.0
SMAT [25]WACV 2481.963.880.858.782.563.481.864.681.862.6126.8-3.28.6
AVTrack-DeiT [47]ICML 2484.365.082.158.786.065.384.866.884.263.8260.359.80.97-1.93.5-7.9
ORTrack-DeiTOurs86.266.483.460.188.666.884.366.485.665.0226.455.42.47.9
ORTrack-D-DeiT83.765.182.559.784.663.984.066.183.763.7292.364.71.55.3
", + "image_path": "e108445084f2db53ba04ebd300c9ae730732181ffe90090330cd8f3bc9c758c5.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 278, + 295, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 278, + 295, + 326 + ], + "spans": [ + { + "bbox": [ + 55, + 278, + 295, + 326 + ], + "type": "text", + "content": "parameters. We evaluate our approach against 13 state-of-the-art (SOTA) lightweight trackers (see Table 1) and 14 SOTA deep trackers designed specifically for generic visual tracking (refer to Table 2)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 333, + 188, + 346 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 333, + 188, + 346 + ], + "spans": [ + { + "bbox": [ + 55, + 333, + 188, + 346 + ], + "type": "text", + "content": "4.1. Implementation Details" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 54, + 352, + 295, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 352, + 295, + 519 + ], + "spans": [ + { + "bbox": [ + 54, + 352, + 295, + 519 + ], + "type": "text", + "content": "We adopt different ViTs as backbones, including ViT-tiny [18], Eva-tiny [21], and DeiT-tiny [73], to build three trackers for evaluation: ORTrack-ViT, ORTrack-Eva, and ORTrack-DeiT. The head of ORTrack consists of a stack of four Conv-BN-ReLU layers. The search region and template sizes are set to " + }, + { + "bbox": [ + 54, + 352, + 295, + 519 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 54, + 352, + 295, + 519 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 54, + 352, + 295, + 519 + ], + "type": "inline_equation", + "content": "128 \\times 128" + }, + { + "bbox": [ + 54, + 352, + 295, + 519 + ], + "type": "text", + "content": ", respectively. A combination of training sets from GOT-10k [30], LaSOT [20], COCO [48], and TrackingNet [56] is used for the training. The batch size is set to 32. We employ the AdamW optimizer [50], with a weight decay of " + }, + { + "bbox": [ + 54, + 352, + 295, + 519 + ], + "type": "inline_equation", + "content": "10^{-4}" + }, + { + "bbox": [ + 54, + 352, + 295, + 519 + ], + "type": "text", + "content": " and an initial learning rate of " + }, + { + "bbox": [ + 54, + 352, + 295, + 519 + ], + "type": "inline_equation", + "content": "4 \\times 10^{-5}" + }, + { + "bbox": [ + 54, + 352, + 295, + 519 + ], + "type": "text", + "content": ". The training is conducted over 300 epochs, with 60,000 image pairs processed in each epoch. The learning rate is reduced by a factor of 10 after 240 epochs." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 527, + 211, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 527, + 211, + 540 + ], + "spans": [ + { + "bbox": [ + 55, + 527, + 211, + 540 + ], + "type": "text", + "content": "4.2. State-of-the-art Comparison" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 545, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 545, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 545, + 295, + 713 + ], + "type": "text", + "content": "Comparison with Lightweight Trackers. The overall performance of our ORTrack in comparison to 13 competing trackers on the four benchmarks is displayed in Table 1. As can be seen, our trackers demonstrate superior performance among all these trackers in terms of average (Avg.) precision (Prec.), success rate (Succ.) and speeds. On average, RACF [42] demonstrated the highest Prec. " + }, + { + "bbox": [ + 55, + 545, + 295, + 713 + ], + "type": "inline_equation", + "content": "(75.9\\%)" + }, + { + "bbox": [ + 55, + 545, + 295, + 713 + ], + "type": "text", + "content": " and Succ. " + }, + { + "bbox": [ + 55, + 545, + 295, + 713 + ], + "type": "inline_equation", + "content": "(51.8\\%)" + }, + { + "bbox": [ + 55, + 545, + 295, + 713 + ], + "type": "text", + "content": " among DCF-based trackers, DRCI [93] achieves the highest precision and success rates, with " + }, + { + "bbox": [ + 55, + 545, + 295, + 713 + ], + "type": "inline_equation", + "content": "81.4\\%" + }, + { + "bbox": [ + 55, + 545, + 295, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 545, + 295, + 713 + ], + "type": "inline_equation", + "content": "60.1\\%" + }, + { + "bbox": [ + 55, + 545, + 295, + 713 + ], + "type": "text", + "content": ", respectively, among CNN-based trackers. However, the average Prec. and Succ. of all our trackers are greater than " + }, + { + "bbox": [ + 55, + 545, + 295, + 713 + ], + "type": "inline_equation", + "content": "82.0\\%" + }, + { + "bbox": [ + 55, + 545, + 295, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 545, + 295, + 713 + ], + "type": "inline_equation", + "content": "62.0\\%" + }, + { + "bbox": [ + 55, + 545, + 295, + 713 + ], + "type": "text", + "content": ", respectively, clearly surpassing DCF- and CNN- based approaches. Additionally, our ORTrack-DeiT achieves the highest Avg. Prec. and" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 278, + 555, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 278, + 555, + 540 + ], + "spans": [ + { + "bbox": [ + 313, + 278, + 555, + 540 + ], + "type": "text", + "content": "Avg. Succ. of " + }, + { + "bbox": [ + 313, + 278, + 555, + 540 + ], + "type": "inline_equation", + "content": "85.6\\%" + }, + { + "bbox": [ + 313, + 278, + 555, + 540 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 278, + 555, + 540 + ], + "type": "inline_equation", + "content": "65.0\\%" + }, + { + "bbox": [ + 313, + 278, + 555, + 540 + ], + "type": "text", + "content": ", respectively, among all competing trackers. Although Aba-ViT track achieves performance close to our ORTrack-DeiT, its GPU speed is significantly lower, with a " + }, + { + "bbox": [ + 313, + 278, + 555, + 540 + ], + "type": "inline_equation", + "content": "23.6\\%" + }, + { + "bbox": [ + 313, + 278, + 555, + 540 + ], + "type": "text", + "content": " relative gap. Notably, when the proposed adaptive knowledge distillation is applied to ORTrack-DeiT, the resulting student model, ORTrack-D-DeiT, shows a significant speed increase: " + }, + { + "bbox": [ + 313, + 278, + 555, + 540 + ], + "type": "inline_equation", + "content": "29.1\\%" + }, + { + "bbox": [ + 313, + 278, + 555, + 540 + ], + "type": "text", + "content": " on GPU and " + }, + { + "bbox": [ + 313, + 278, + 555, + 540 + ], + "type": "inline_equation", + "content": "16.8\\%" + }, + { + "bbox": [ + 313, + 278, + 555, + 540 + ], + "type": "text", + "content": " on CPU. This improvement is accompanied by a minimal reduction in accuracy, with only a " + }, + { + "bbox": [ + 313, + 278, + 555, + 540 + ], + "type": "inline_equation", + "content": "1.9\\%" + }, + { + "bbox": [ + 313, + 278, + 555, + 540 + ], + "type": "text", + "content": " decrease in Avg. Prec. and a " + }, + { + "bbox": [ + 313, + 278, + 555, + 540 + ], + "type": "inline_equation", + "content": "1.3\\%" + }, + { + "bbox": [ + 313, + 278, + 555, + 540 + ], + "type": "text", + "content": " decrease in Avg. Succ.. All proposed trackers can run in real-time on a single " + }, + { + "bbox": [ + 313, + 278, + 555, + 540 + ], + "type": "inline_equation", + "content": "\\mathbf{CPU}^*" + }, + { + "bbox": [ + 313, + 278, + 555, + 540 + ], + "type": "text", + "content": " and our ORTrack-DeiT sets a new performance record for real-time UAV tracking. We also compare the floating point operations per second (FLOPs) and number of parameters (Params.) of our method with CNN-based and ViT-based trackers in Table 1. Our method demonstrates a relatively lower parameter count and reduced computational complexity compared to these approaches. Notably, since AVTrack-DeiT tracker features adaptive architectures, the FLOPs and parameters range from minimum to maximum values. These results highlight our method's effectiveness and its state-of-the-art performance." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 541, + 556, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 541, + 556, + 661 + ], + "spans": [ + { + "bbox": [ + 313, + 541, + 556, + 661 + ], + "type": "text", + "content": "Comparison with Deep Trackers. The proposed ORTrack-DeiT is also compared with 14 SOTA deep trackers in Table 2, which shows precision (Prec.) and GPU speed on VisDrone2018. Our ORTrack-DeiT surpasses all other methods in both metrics, demonstrating its superior accuracy and speed. Although trackers like AQATrack [87], HIPTrack [2], and ROMTrack [3] achieve precision comparable to our ORTrack-DeiT, their GPU speeds are much slower. Specifically, our method is 4, 6, and 4 times faster than AQATrack, HIPTrack, and ROMTrack, respectively." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 661, + 555, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 661, + 555, + 685 + ], + "spans": [ + { + "bbox": [ + 313, + 661, + 555, + 685 + ], + "type": "text", + "content": "Attribute-Based Evaluation. To access our method's robustness against target occlusion, we compare ORTrack-" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 313, + 693, + 555, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 693, + 555, + 712 + ], + "spans": [ + { + "bbox": [ + 313, + 693, + 555, + 712 + ], + "type": "text", + "content": "*Real-time performance applies to platforms similar to or more advanced than ours." + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 70, + 91, + 541, + 148 + ], + "blocks": [ + { + "bbox": [ + 73, + 71, + 536, + 83 + ], + "lines": [ + { + "bbox": [ + 73, + 71, + 536, + 83 + ], + "spans": [ + { + "bbox": [ + 73, + 71, + 536, + 83 + ], + "type": "text", + "content": "Table 2. Precision (Prec.) and speed (FPS) comparison between ORTrack-DeiT and deep-based trackers on VisDrone2018 [98]." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 70, + 91, + 541, + 148 + ], + "lines": [ + { + "bbox": [ + 70, + 91, + 541, + 148 + ], + "spans": [ + { + "bbox": [ + 70, + 91, + 541, + 148 + ], + "type": "table", + "html": "
TrackerSourcePrec.Succ.FPSTrackerSourcePrec.Succ.FPSTrackerSourcePrec.Succ.FPS
ORTrack-DeiTOurs88.666.8206.2ZoomTrack [38]NIPS 2381.463.461.7SimTrack [8]ECCV 2280.060.969.7
AQATrack [87]CVPR 2487.266.953.4SeqTrack [10]CVPR 2385.365.815.3ToMP [55]CVPR 2284.164.421.4
HIPTrack [2]CVPR 2486.767.131.3MAT [96]CVPR 2381.662.268.4KeepTrack [54]ICCV 2184.063.520.3
EVPTrack [69]AAAI 2484.565.822.1SparseTT [23]IJCAI 2281.462.130.2SAOT [97]ICCV 2176.959.135.4
ROMTrack [3]ICCV 2386.466.751.1OSTrack [91]ECCV 2284.264.862.7PrDiMP50 [15]CVPR 2079.459.742.6
", + "image_path": "30f4f63d5479c9e7c39cc7978d66b2f065cec62132404c0b0f5b0ec73dd8fa3a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 57, + 165, + 293, + 300 + ], + "blocks": [ + { + "bbox": [ + 57, + 165, + 293, + 300 + ], + "lines": [ + { + "bbox": [ + 57, + 165, + 293, + 300 + ], + "spans": [ + { + "bbox": [ + 57, + 165, + 293, + 300 + ], + "type": "image", + "image_path": "ed103c4c98bb6d874794172cd813dd855455cca2395fd3ffe3039b18f26b538f.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 308, + 295, + 343 + ], + "lines": [ + { + "bbox": [ + 55, + 308, + 295, + 343 + ], + "spans": [ + { + "bbox": [ + 55, + 308, + 295, + 343 + ], + "type": "text", + "content": "Figure 3. Attribute-based comparison on the partial occlusion subset of VisDrone2018 [98]. ORTrack-DeiT* refers to ORTrack-DeiT without applying the occlusion-robust enhancement." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 54, + 364, + 295, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 364, + 295, + 520 + ], + "spans": [ + { + "bbox": [ + 54, + 364, + 295, + 520 + ], + "type": "text", + "content": "DeiT alongside 16 SOTA trackers on the partial occlusion subset of VisDrone2018. Additionally, we also assess the baseline ORTrack-DeiT*, i.e., ORTrack-DeiT without applying the proposed method for learning Occlusion-Robust Representation (ORR), for comparison. The precision plot are presented in Fig. 3, with additional attribute-based evaluation results provided in the supplemental materials. As observed, ORTrack-DeiT achieves the second-highest precision " + }, + { + "bbox": [ + 54, + 364, + 295, + 520 + ], + "type": "inline_equation", + "content": "(85.0\\%)" + }, + { + "bbox": [ + 54, + 364, + 295, + 520 + ], + "type": "text", + "content": ", just slightly behind the first-ranked tracker AQATrack by " + }, + { + "bbox": [ + 54, + 364, + 295, + 520 + ], + "type": "inline_equation", + "content": "0.2\\%" + }, + { + "bbox": [ + 54, + 364, + 295, + 520 + ], + "type": "text", + "content": ". Remarkably, incorporating the proposed components leads to a significant improvement over ORTrack-DeiT*, with increases of " + }, + { + "bbox": [ + 54, + 364, + 295, + 520 + ], + "type": "inline_equation", + "content": "6.9\\%" + }, + { + "bbox": [ + 54, + 364, + 295, + 520 + ], + "type": "text", + "content": " in Prec., well underscoring the effectiveness of our method." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 529, + 149, + 542 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 529, + 149, + 542 + ], + "spans": [ + { + "bbox": [ + 55, + 529, + 149, + 542 + ], + "type": "text", + "content": "4.3. Ablation Study" + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 71, + 579, + 281, + 677 + ], + "blocks": [ + { + "bbox": [ + 66, + 559, + 284, + 570 + ], + "lines": [ + { + "bbox": [ + 66, + 559, + 284, + 570 + ], + "spans": [ + { + "bbox": [ + 66, + 559, + 284, + 570 + ], + "type": "text", + "content": "Table 3. Effect of ORR and AFKD on the baseline trackers." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 71, + 579, + 281, + 677 + ], + "lines": [ + { + "bbox": [ + 71, + 579, + 281, + 677 + ], + "spans": [ + { + "bbox": [ + 71, + 579, + 281, + 677 + ], + "type": "table", + "html": "
TrackerORRAFKDUAVDTFPS
Prec.Succ.
ORTrack-ViT77.055.6216.2
80.3↑3.358.2↑2.6-
79.1↑2.157.5↑1.9290.3↑34%
ORTrack-Eva78.156.6238.3
80.8↑2.758.7↑2.1-
79.5↑1.457.8↑1.2308.8↑30%
ORTrack-DeiT78.656.7218.4
83.4↑4.860.1↑3.4-
82.5↑3.959.7↑3.0298.7↑36%
", + "image_path": "778d6455e7c357455f6bc0b24a76e246ad665f3f59a3cf71913b262d79b1b256.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 689, + 295, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 295, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 295, + 715 + ], + "type": "text", + "content": "Effect of Occlusion-Robust Representations (ORR) and Adaptive Feature-Based Knowledge Distillation" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 167, + 555, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 167, + 555, + 419 + ], + "spans": [ + { + "bbox": [ + 313, + 167, + 555, + 419 + ], + "type": "text", + "content": "(AFKD). To demonstrate the effectiveness of the proposed ORR and AFKD, Table 3 shows the evaluation results on UAVDT dataset as these components are gradually integrated into the baselines. To avoid potential variations due to randomness, we only present the speed of the baseline, since the GPU speeds of the baseline and its ORR-enhanced version are theoretically identical. As can be seen, the incorporation of ORR significantly enhances both Prec. and Succ. for all baseline trackers. Specifically, the Prec. increases for ORTrack-ViT, ORTrack-Eva, and ORTrack-DeiT are " + }, + { + "bbox": [ + 313, + 167, + 555, + 419 + ], + "type": "inline_equation", + "content": "3.3\\%" + }, + { + "bbox": [ + 313, + 167, + 555, + 419 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 167, + 555, + 419 + ], + "type": "inline_equation", + "content": "2.7\\%" + }, + { + "bbox": [ + 313, + 167, + 555, + 419 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 313, + 167, + 555, + 419 + ], + "type": "inline_equation", + "content": "4.8\\%" + }, + { + "bbox": [ + 313, + 167, + 555, + 419 + ], + "type": "text", + "content": ", respectively, while the Succ. increases are " + }, + { + "bbox": [ + 313, + 167, + 555, + 419 + ], + "type": "inline_equation", + "content": "2.6\\%" + }, + { + "bbox": [ + 313, + 167, + 555, + 419 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 167, + 555, + 419 + ], + "type": "inline_equation", + "content": "2.1\\%" + }, + { + "bbox": [ + 313, + 167, + 555, + 419 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 313, + 167, + 555, + 419 + ], + "type": "inline_equation", + "content": "3.1\\%" + }, + { + "bbox": [ + 313, + 167, + 555, + 419 + ], + "type": "text", + "content": ", respectively. These significant enhancements highlight the effectiveness of ORR in improving tracking precision. The further integration of AFKD results in consistent improvements in GPU speeds, with only slight reductions in Prec. and Succ. Specifically, all baseline trackers experience GPU speed enhancements of over " + }, + { + "bbox": [ + 313, + 167, + 555, + 419 + ], + "type": "inline_equation", + "content": "30.0\\%" + }, + { + "bbox": [ + 313, + 167, + 555, + 419 + ], + "type": "text", + "content": ", with ORTrack-DeiT showing an impressive " + }, + { + "bbox": [ + 313, + 167, + 555, + 419 + ], + "type": "inline_equation", + "content": "36.0\\%" + }, + { + "bbox": [ + 313, + 167, + 555, + 419 + ], + "type": "text", + "content": " improvement. These results affirm the effectiveness of AFKD in optimizing tracking efficiency while maintaining high tracking performance." + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 318, + 449, + 552, + 522 + ], + "blocks": [ + { + "bbox": [ + 318, + 429, + 549, + 441 + ], + "lines": [ + { + "bbox": [ + 318, + 429, + 549, + 441 + ], + "spans": [ + { + "bbox": [ + 318, + 429, + 549, + 441 + ], + "type": "text", + "content": "Table 4. Impact of various Masking Operators on performance." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 318, + 449, + 552, + 522 + ], + "lines": [ + { + "bbox": [ + 318, + 449, + 552, + 522 + ], + "spans": [ + { + "bbox": [ + 318, + 449, + 552, + 522 + ], + "type": "table", + "html": "
MethodmUmCSAM[37]AdAutoMix[65]CutMix[92]VisDrone2018
Prec.Succ.
ORTrack-DeiT81.662.2
86.765.4
88.666.8
86.865.6
84.363.8
85.764.2
", + "image_path": "a32930026e85095da5c264c34b784b8587c0f7197f937d9831316c7c501c4002.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 534, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 534, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 534, + 555, + 713 + ], + "type": "text", + "content": "Effect of Masking Operators. To demonstrate the superiority of the proposed masking operator in terms of performance, we evaluate ORTrack-DeiT with various implementations of masking operators (i.e., " + }, + { + "bbox": [ + 313, + 534, + 555, + 713 + ], + "type": "inline_equation", + "content": "\\mathfrak{m}_{\\mathrm{U}}" + }, + { + "bbox": [ + 313, + 534, + 555, + 713 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 534, + 555, + 713 + ], + "type": "inline_equation", + "content": "\\mathfrak{m}_{\\mathrm{C}}" + }, + { + "bbox": [ + 313, + 534, + 555, + 713 + ], + "type": "text", + "content": ", and SAM [37]) alongside data mixing augmentation methods (i.e., AdAutoMix [65] and CutMix [92]). The evaluation results on VisDrone2018 are presented in Table 4. As shown, although using SAM, AdAutoMix, and CutMix improves performance, the best result achieved with SAM is only comparable to the performance of our " + }, + { + "bbox": [ + 313, + 534, + 555, + 713 + ], + "type": "inline_equation", + "content": "\\mathfrak{m}_{\\mathrm{U}}" + }, + { + "bbox": [ + 313, + 534, + 555, + 713 + ], + "type": "text", + "content": " masking operator. When " + }, + { + "bbox": [ + 313, + 534, + 555, + 713 + ], + "type": "inline_equation", + "content": "\\mathfrak{m}_{\\mathrm{C}}" + }, + { + "bbox": [ + 313, + 534, + 555, + 713 + ], + "type": "text", + "content": " is applied, the improvements are even more substantial, with increases of " + }, + { + "bbox": [ + 313, + 534, + 555, + 713 + ], + "type": "inline_equation", + "content": "7.0\\%" + }, + { + "bbox": [ + 313, + 534, + 555, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 534, + 555, + 713 + ], + "type": "inline_equation", + "content": "4.6\\%" + }, + { + "bbox": [ + 313, + 534, + 555, + 713 + ], + "type": "text", + "content": ", respectively. These results validate the effectiveness of the proposed ORR component and particularly demonstrate the superiority of the masking operator based on spatial Cox processes." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 71, + 102, + 282, + 152 + ], + "blocks": [ + { + "bbox": [ + 55, + 70, + 297, + 95 + ], + "lines": [ + { + "bbox": [ + 55, + 70, + 297, + 95 + ], + "spans": [ + { + "bbox": [ + 55, + 70, + 297, + 95 + ], + "type": "text", + "content": "Table 5. Impact of the adaptive knowledge distillation loss on the generalizability on LaSOT and TrackingNet." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 71, + 102, + 282, + 152 + ], + "lines": [ + { + "bbox": [ + 71, + 102, + 282, + 152 + ], + "spans": [ + { + "bbox": [ + 71, + 102, + 282, + 152 + ], + "type": "table", + "html": "
MethodKDAFKDLaSOTTrackingNet
AUCPnormPAUCPnormP
ORTrack-DeiT53.760.852.672.877.867.1
54.061.253.273.178.467.4
54.662.654.373.779.168.2
", + "image_path": "0797ddf346306b391113d1ad5a815add8a24529d2d51fe9d06dc617dc39d1caf.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 169, + 296, + 314 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 169, + 296, + 314 + ], + "spans": [ + { + "bbox": [ + 55, + 169, + 296, + 314 + ], + "type": "text", + "content": "Impact of the Adaptive Knowledge Distillation Loss. To assess the impact of the adaptive knowledge distillation loss on generalizability, we train ORTrack-DeiT using GOT-10K with " + }, + { + "bbox": [ + 55, + 169, + 296, + 314 + ], + "type": "inline_equation", + "content": "\\varpi (\\mathcal{L}_{iou};\\alpha ,\\beta)" + }, + { + "bbox": [ + 55, + 169, + 296, + 314 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 169, + 296, + 314 + ], + "type": "inline_equation", + "content": "\\varpi (\\mathcal{L}_{iou};\\alpha ,0)" + }, + { + "bbox": [ + 55, + 169, + 296, + 314 + ], + "type": "text", + "content": " separately, then evaluate them on LaSOT and TrackingNet. The results are shown in Table 5. Note that " + }, + { + "bbox": [ + 55, + 169, + 296, + 314 + ], + "type": "inline_equation", + "content": "\\varpi (\\mathcal{L}_{iou};\\alpha ,0)" + }, + { + "bbox": [ + 55, + 169, + 296, + 314 + ], + "type": "text", + "content": " degenerates to a non-adaptive knowledge distillation loss as it becomes a constant. As can be seen, AFKD demonstrates greater performance improvements than KD. For instance, using AFKD results in additional gains of over " + }, + { + "bbox": [ + 55, + 169, + 296, + 314 + ], + "type": "inline_equation", + "content": "1.1\\%" + }, + { + "bbox": [ + 55, + 169, + 296, + 314 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 55, + 169, + 296, + 314 + ], + "type": "inline_equation", + "content": "P_{norm}" + }, + { + "bbox": [ + 55, + 169, + 296, + 314 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 169, + 296, + 314 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 55, + 169, + 296, + 314 + ], + "type": "text", + "content": " on LaSOT, demonstrating its superior generalizability." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 69, + 354, + 284, + 428 + ], + "blocks": [ + { + "bbox": [ + 55, + 322, + 296, + 346 + ], + "lines": [ + { + "bbox": [ + 55, + 322, + 296, + 346 + ], + "spans": [ + { + "bbox": [ + 55, + 322, + 296, + 346 + ], + "type": "text", + "content": "Table 6. Application of our ORR component to three SOTA trackers: ARTrack [81], GRM [24], and DropTrack[82]." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 69, + 354, + 284, + 428 + ], + "lines": [ + { + "bbox": [ + 69, + 354, + 284, + 428 + ], + "spans": [ + { + "bbox": [ + 69, + 354, + 284, + 428 + ], + "type": "table", + "html": "
TrackerORRUAVDTVisDrone2018
Prec.Succ.Prec.Succ.
ARTrack[81]77.154.677.759.5
78.5↑1.455.8↑1.279.5↑1.860.8↑1.3
GRM[24]79.057.782.763.4
81.7↑1.759.3↑1.684.8↑2.164.6↑1.2
DropTrack[82]76.955.981.562.7
78.7↑1.857.4↑1.582.8↑1.364.2↑1.5
", + "image_path": "3b4f89479c5ff8243e6be20cd787d0178a5864faf1227a509ce683510651c3ac.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 54, + 438, + 295, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 438, + 295, + 605 + ], + "spans": [ + { + "bbox": [ + 54, + 438, + 295, + 605 + ], + "type": "text", + "content": "Application to SOTA trackers. To show the wide applicability of our proposed method, we incorporate the proposed ORR into three existing SOTA trackers: ARTrack [81], GRM [24], and DropTrack [82]. Please note that we replace the model's original backbones with ViT-tiny [18] to reduce training time. As shown in Table 6, incorporating ORR results in significant improvements in both precision and success rates for the three baseline trackers. Specifically, ARTrack, GRM, and DropTrack demonstrate an improvement of more than " + }, + { + "bbox": [ + 54, + 438, + 295, + 605 + ], + "type": "inline_equation", + "content": "1.2\\%" + }, + { + "bbox": [ + 54, + 438, + 295, + 605 + ], + "type": "text", + "content": " in both precision and success rate across two datasets. These experimental results demonstrate that the proposed ORR component can be seamlessly integrated into existing tracking frameworks, significantly improving tracking accuracy." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 54, + 605, + 295, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 605, + 295, + 701 + ], + "spans": [ + { + "bbox": [ + 54, + 605, + 295, + 701 + ], + "type": "text", + "content": "Qualitative Results. Several qualitative tracking results of ORTrack-DeiT and seven SOTA UAV trackers are shown in Fig. 4. As can be seen, only our tracker successfully tracks the targets in all challenging examples, where pose variations, background clusters, and scale variations are presented. In these cases, our method performs significantly better and is more visually appealing, bolstering the effectiveness of the proposed method for UAV tracking." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 701, + 295, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 701, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 67, + 701, + 295, + 714 + ], + "type": "text", + "content": "Figure 5 shows attention and feature maps produced by" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 317, + 71, + 553, + 222 + ], + "blocks": [ + { + "bbox": [ + 317, + 71, + 553, + 222 + ], + "lines": [ + { + "bbox": [ + 317, + 71, + 553, + 222 + ], + "spans": [ + { + "bbox": [ + 317, + 71, + 553, + 222 + ], + "type": "image", + "image_path": "305cd2d33c6dc044b94a239788e02caf27d1d84283759f722ef53e9fae001cb9.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 227, + 555, + 262 + ], + "lines": [ + { + "bbox": [ + 313, + 227, + 555, + 262 + ], + "spans": [ + { + "bbox": [ + 313, + 227, + 555, + 262 + ], + "type": "text", + "content": "Figure 4. Qualitative evaluation on 3 video sequences from, respectively, UAV123 [57], UAVDT [19], and VisDrone2018 [98] (i.e., person9, S1607, and uav0000180_00050_s)." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 315, + 271, + 553, + 360 + ], + "blocks": [ + { + "bbox": [ + 315, + 271, + 553, + 360 + ], + "lines": [ + { + "bbox": [ + 315, + 271, + 553, + 360 + ], + "spans": [ + { + "bbox": [ + 315, + 271, + 553, + 360 + ], + "type": "image", + "image_path": "016234796faa3fcbf9a729c4310a83731bee82db9c21aaee76b7a33bb734fac4.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 367, + 555, + 425 + ], + "lines": [ + { + "bbox": [ + 313, + 367, + 555, + 425 + ], + "spans": [ + { + "bbox": [ + 313, + 367, + 555, + 425 + ], + "type": "text", + "content": "Figure 5. Visualize the attention map (left) and feature map (right) of the target images. The first row displays the search and masked images with masking ratios of " + }, + { + "bbox": [ + 313, + 367, + 555, + 425 + ], + "type": "inline_equation", + "content": "0\\%" + }, + { + "bbox": [ + 313, + 367, + 555, + 425 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 367, + 555, + 425 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 313, + 367, + 555, + 425 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 367, + 555, + 425 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 313, + 367, + 555, + 425 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 313, + 367, + 555, + 425 + ], + "type": "inline_equation", + "content": "70\\%" + }, + { + "bbox": [ + 313, + 367, + 555, + 425 + ], + "type": "text", + "content": ". The second and third rows show the attention and feature maps generated by ORTrack-DeiT, with and without ORR, respectively." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 429, + 556, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 429, + 556, + 491 + ], + "spans": [ + { + "bbox": [ + 313, + 429, + 556, + 491 + ], + "type": "text", + "content": "ORTrack-DeiT, with and without occlusion-robust enhancement. We observe that ORTrack-DeiT with ORR maintains a clearer focus on the targets and exhibits more consistent feature maps across masking ratios. These results support the effectiveness of our ORR component." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 499, + 388, + 512 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 499, + 388, + 512 + ], + "spans": [ + { + "bbox": [ + 313, + 499, + 388, + 512 + ], + "type": "text", + "content": "5. Conclusion" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 312, + 520, + 555, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 520, + 555, + 662 + ], + "spans": [ + { + "bbox": [ + 312, + 520, + 555, + 662 + ], + "type": "text", + "content": "In view of the common challenges posed by target occlusion in UAV tracking, in this work, we proposed to learn Occlusion-Robust Representation (ORR) by imposing an invariance of feature representation of the target with respect to random masking modeled by a spatial Cox process. Moreover, we propose an Adaptive Feature-Based Knowledge Distillation (AFKD) to enhance efficiency. Our approach is notably straightforward and can be easily integrated into other tracking frameworks. Extensive experiments across multiple UAV tracking benchmarks validate the effectiveness of our method, demonstrating that our ORTrack-DeiT achieves SOTA performance." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 663, + 556, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 663, + 556, + 712 + ], + "spans": [ + { + "bbox": [ + 313, + 663, + 556, + 712 + ], + "type": "text", + "content": "Acknowledgments. This work was funded by the Guangxi Natural Science Foundation (Grant No. 2024GXNSFAA010484), and the National Natural Science Foundation of China (Nos. 62466013, 62206123)." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 71, + 115, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 71, + 115, + 83 + ], + "spans": [ + { + "bbox": [ + 56, + 71, + 115, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 57, + 91, + 296, + 712 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 62, + 91, + 296, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 91, + 296, + 150 + ], + "spans": [ + { + "bbox": [ + 62, + 91, + 296, + 150 + ], + "type": "text", + "content": "[1] Wesam A. Askar, Osama Elmowafy, Anca L. Ralescu, Aliaa Abdel-Halim Youssif, and Gamal A. Elnashar. Occlusion detection and processing using optical flow and particle filter. Int. J. Adv. Intell. Paradigms, 15:63-76, 2020. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 61, + 152, + 296, + 189 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 152, + 296, + 189 + ], + "spans": [ + { + "bbox": [ + 61, + 152, + 296, + 189 + ], + "type": "text", + "content": "[2] Wenrui Cai, Qingjie Liu, and Yunhong Wang. Hiptrack: Visual tracking with historical prompts. In CVPR, pages 19258-19267, 2024. 6, 7" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 62, + 190, + 295, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 190, + 295, + 227 + ], + "spans": [ + { + "bbox": [ + 62, + 190, + 295, + 227 + ], + "type": "text", + "content": "[3] Yidong Cai, Jie Liu, Jie Tang, and Gangshan Wu. Robust object modeling for visual tracking. In ICCV, pages 9589-9600, 2023. 6, 7" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 62, + 228, + 295, + 275 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 228, + 295, + 275 + ], + "spans": [ + { + "bbox": [ + 62, + 228, + 295, + 275 + ], + "type": "text", + "content": "[4] Ziang Cao, Changhong Fu, Junjie Ye, Bowen Li, and Yiming Li. Hift: Hierarchical feature transformer for aerial tracking. In ICCV, pages 15457-15466, 2021. 1, 2, 6" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 62, + 278, + 295, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 278, + 295, + 326 + ], + "spans": [ + { + "bbox": [ + 62, + 278, + 295, + 326 + ], + "type": "text", + "content": "[5] Ziang Cao, Ziyuan Huang, Liang Pan, Shiwei Zhang, Ziwei Liu, and Changhong Fu. Ttrack: Temporal contexts for aerial tracking. In CVPR, pages 14798-14808, 2022. 1, 2, 6" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 62, + 327, + 295, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 327, + 295, + 363 + ], + "spans": [ + { + "bbox": [ + 62, + 327, + 295, + 363 + ], + "type": "text", + "content": "[6] Satyaki Chakraborty and Martial Hebert. Learning to track object position through occlusion. ArXiv, abs/2106.10766, 2021. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 62, + 365, + 295, + 401 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 365, + 295, + 401 + ], + "spans": [ + { + "bbox": [ + 62, + 365, + 295, + 401 + ], + "type": "text", + "content": "[7] T-H Chang and Shaogang Gong. Tracking multiple people with a multi-camera system. In Womot, pages 19-26, 2001. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 62, + 403, + 295, + 450 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 403, + 295, + 450 + ], + "spans": [ + { + "bbox": [ + 62, + 403, + 295, + 450 + ], + "type": "text", + "content": "[8] Boyu Chen, Peixia Li, Lei Bai, Lei Qiao, and et al. Backbone is all your need: a simplified architecture for visual object tracking. In ECCV, pages 375-392, 2022. 1, 7" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 62, + 453, + 295, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 453, + 295, + 488 + ], + "spans": [ + { + "bbox": [ + 62, + 453, + 295, + 488 + ], + "type": "text", + "content": "[9] Guobin Chen, Wongun Choi, Xiang Yu, Tony Han, and et al. Learning efficient object detection models with knowledge distillation. NIPS, 30, 2017. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 57, + 491, + 295, + 537 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 491, + 295, + 537 + ], + "spans": [ + { + "bbox": [ + 57, + 491, + 295, + 537 + ], + "type": "text", + "content": "[10] Xin Chen, Houwen Peng, Dong Wang, Huchuan Lu, and Han Hu. Seqtrack: Sequence to sequence learning for visual object tracking. In CVPR, pages 14572-14581, 2023. 7" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 57, + 540, + 295, + 575 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 540, + 295, + 575 + ], + "spans": [ + { + "bbox": [ + 57, + 540, + 295, + 575 + ], + "type": "text", + "content": "[11] Yuanda Chen. Thinning algorithms for simulating point processes. Florida State University, Tallahassee, FL, 2016. 4" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 57, + 578, + 295, + 625 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 578, + 295, + 625 + ], + "spans": [ + { + "bbox": [ + 57, + 578, + 295, + 625 + ], + "type": "text", + "content": "[12] Cheng Chi, Shifeng Zhang, Junliang Xing, Zhen Lei, S. Li, and Xudong Zou. Pedhunter: Occlusion robust pedestrian detector in crowded scenes. ArXiv, abs/1909.06826, 2019. 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 57, + 628, + 295, + 664 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 628, + 295, + 664 + ], + "spans": [ + { + "bbox": [ + 57, + 628, + 295, + 664 + ], + "type": "text", + "content": "[13] Yutao Cui, Cheng Jiang, and et al. Mixformer: End-to-end tracking with iterative mixed attention. In CVPR, pages 13608-13618, 2022. 1, 2, 5" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 57, + 666, + 295, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 666, + 295, + 712 + ], + "spans": [ + { + "bbox": [ + 57, + 666, + 295, + 712 + ], + "type": "text", + "content": "[14] Martin Danelljan, Goutam Bhat, Fahad Shahbaz Khan, and Michael Felsberg. Eco: Efficient convolution operators for tracking. In CVPR, pages 6638-6646, 2017. 6" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 72, + 553, + 714 + ], + "type": "list", + "angle": 0, + "index": 33, + "blocks": [ + { + "bbox": [ + 316, + 72, + 553, + 107 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 72, + 553, + 107 + ], + "spans": [ + { + "bbox": [ + 316, + 72, + 553, + 107 + ], + "type": "text", + "content": "[15] Martin Danelljan, Luc Van Gool, and Radu Timofte. Probabilistic regression for visual tracking. In CVPR, pages 7181-7190, 2020. 7" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 110, + 553, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 110, + 553, + 144 + ], + "spans": [ + { + "bbox": [ + 316, + 110, + 553, + 144 + ], + "type": "text", + "content": "[16] Martin Danelljan, Gustav Hager, Fahad Shahbaz Khan, and et al. Discriminative scale space tracking. IEEE TPAMI, 39(8):1561-1575, 2017. 6" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 317, + 145, + 553, + 193 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 145, + 553, + 193 + ], + "spans": [ + { + "bbox": [ + 317, + 145, + 553, + 193 + ], + "type": "text", + "content": "[17] Soumen Das, Saroj K. Biswas, and Biswajit Purkayastha. Occlusion robust sign language recognition system for indian sign language using cnn and pose features. Multimed. Tools. Appl, 2024. 3" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 317, + 194, + 553, + 240 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 194, + 553, + 240 + ], + "spans": [ + { + "bbox": [ + 317, + 194, + 553, + 240 + ], + "type": "text", + "content": "[18] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, and et al. An image is worth 16x16 words: Transformers for image recognition at scale. ArXiv, abs/2010.11929, 2020. 6, 8" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 243, + 553, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 243, + 553, + 277 + ], + "spans": [ + { + "bbox": [ + 316, + 243, + 553, + 277 + ], + "type": "text", + "content": "[19] Dawei Du, Yuankai Qi, Hongyang Yu, and et al. The unmanned aerial vehicle benchmark: Object detection and tracking. In ECCV, pages 375-391, 2018. 5, 6, 8" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 278, + 553, + 314 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 278, + 553, + 314 + ], + "spans": [ + { + "bbox": [ + 316, + 278, + 553, + 314 + ], + "type": "text", + "content": "[20] Heng Fan, Liting Lin, Fan Yang, and et al. Lasot: A high-quality benchmark for large-scale single object tracking. In CVPR, pages 5369-5378, 2018. 6" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 316, + 553, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 316, + 553, + 350 + ], + "spans": [ + { + "bbox": [ + 316, + 316, + 553, + 350 + ], + "type": "text", + "content": "[21] Yuxin Fang, Quan Sun, Xinggang Wang, and et al. Eva-02: A visual representation for neon genesis. Image and Vision Computing, 149:105171, 2024. 6" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 351, + 553, + 386 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 351, + 553, + 386 + ], + "spans": [ + { + "bbox": [ + 316, + 351, + 553, + 386 + ], + "type": "text", + "content": "[22] Changhong Fu, Xiang Lei, and et al. Progressive representation learning for real-time UAV tracking. In IROS, pages 5072-5079, 2024. 6" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 388, + 553, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 388, + 553, + 422 + ], + "spans": [ + { + "bbox": [ + 316, + 388, + 553, + 422 + ], + "type": "text", + "content": "[23] Zhihong Fu, Zehua Fu, Qingjie Liu, Wenrui Cai, and Yunhong Wang. Sparsett: Visual tracking with sparse transformers. arXiv e-prints, 2022. 7" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 424, + 553, + 459 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 424, + 553, + 459 + ], + "spans": [ + { + "bbox": [ + 316, + 424, + 553, + 459 + ], + "type": "text", + "content": "[24] Shenyuan Gao, Chunluan Zhou, and Jun Zhang. Generalized relation modeling for transformer tracking. In CVPR, pages 18686-18695, 2023. 8" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 460, + 553, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 460, + 553, + 495 + ], + "spans": [ + { + "bbox": [ + 316, + 460, + 553, + 495 + ], + "type": "text", + "content": "[25] Goutam Yelluru Gopal and Maria A Amer. Separable self and mixed attention transformers for efficient object tracking. In WACV, pages 6708-6717, 2024. 6" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 316, + 497, + 553, + 531 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 497, + 553, + 531 + ], + "spans": [ + { + "bbox": [ + 316, + 497, + 553, + 531 + ], + "type": "text", + "content": "[26] Jianping Gou, Baosheng Yu, Stephen J Maybank, and Dacheng Tao. Knowledge distillation: A survey. IJCV, 129(6):1789-1819, 2021. 3, 5" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 316, + 533, + 553, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 533, + 553, + 567 + ], + "spans": [ + { + "bbox": [ + 316, + 533, + 553, + 567 + ], + "type": "text", + "content": "[27] Karthik Hariharakrishnan and Dan Schonfeld. Fast object tracking using adaptive block matching. IEEE TMM, 7:853-859, 2005. 3" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 316, + 569, + 553, + 604 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 569, + 553, + 604 + ], + "spans": [ + { + "bbox": [ + 316, + 569, + 553, + 604 + ], + "type": "text", + "content": "[28] Kaiming He, Xinlei Chen, Saining Xie, and et al. Masked autoencoders are scalable vision learners. In CVPR, pages 15979-15988, 2021. 3" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 316, + 605, + 553, + 639 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 605, + 553, + 639 + ], + "spans": [ + { + "bbox": [ + 316, + 605, + 553, + 639 + ], + "type": "text", + "content": "[29] João F. Henriques, Rui Caseiro, Pedro Martins, and et al. High-speed tracking with kernelized correlation filters. IEEE TPAMI, 37:583-596, 2015. 6" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 316, + 642, + 553, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 642, + 553, + 677 + ], + "spans": [ + { + "bbox": [ + 316, + 642, + 553, + 677 + ], + "type": "text", + "content": "[30] L. Huang, X. Zhao, and K. Huang. Got-10k: A large high-diversity benchmark for generic object tracking in the wild. IEEE TPAMI, (5), 2021. 6" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 316, + 678, + 553, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 678, + 553, + 714 + ], + "spans": [ + { + "bbox": [ + 316, + 678, + 553, + 714 + ], + "type": "text", + "content": "[31] Ziyuan Huang, Changhong Fu, and et al. Learning aberrance repressed correlation filters for real-time uav tracking. In ICCV, pages 2891-2900, 2019. 2" + } + ] + } + ], + "index": 32 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 297, + 713 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 56, + 72, + 297, + 108 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 297, + 108 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 297, + 108 + ], + "type": "text", + "content": "[32] Janine Illian, Antti Penttinen, Helga Stoyan, and Dieterich Stoyan. Statistical analysis and modelling of spatial point patterns. John Wiley & Sons, 2008. 3, 4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 110, + 295, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 110, + 295, + 144 + ], + "spans": [ + { + "bbox": [ + 56, + 110, + 295, + 144 + ], + "type": "text", + "content": "[33] Michal Irani and Shmuel Peleg. Motion analysis for image enhancement: Resolution, occlusion, and transparency. JVCIR, 4(4):324-335, 1993. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 147, + 294, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 147, + 294, + 194 + ], + "spans": [ + { + "bbox": [ + 56, + 147, + 294, + 194 + ], + "type": "text", + "content": "[34] Dippal Israni and Hiren K. Mewada. Feature descriptor based identity retention and tracking of players under intense occlusion in soccer videos. Int. J. Intell. Eng. Syst, 2018. 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 196, + 294, + 231 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 196, + 294, + 231 + ], + "spans": [ + { + "bbox": [ + 56, + 196, + 294, + 231 + ], + "type": "text", + "content": "[35] Minyang Jiang and et al. Occlusion-robust fau recognition by mining latent space of masked autoencoders. Neurocomputing, 569:127107, 2024. 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 233, + 294, + 267 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 233, + 294, + 267 + ], + "spans": [ + { + "bbox": [ + 56, + 233, + 294, + 267 + ], + "type": "text", + "content": "[36] Jung Uk Kim, Ju Won Kwon, and et al. BBC net: Bounding-box critic network for occlusion-robust object detection. IEEE TCSVT, 30:1037-1050, 2020. 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 270, + 294, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 270, + 294, + 293 + ], + "spans": [ + { + "bbox": [ + 56, + 270, + 294, + 293 + ], + "type": "text", + "content": "[37] Alexander Kirillov, Eric Mintun, and et al. Segment anything. In ICCV, pages 4015-4026, 2023. 7" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 295, + 294, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 295, + 294, + 330 + ], + "spans": [ + { + "bbox": [ + 56, + 295, + 294, + 330 + ], + "type": "text", + "content": "[38] Yutong Kou, Jin Gao, Bing Li, and et al. Zoomtrack: Target-aware non-uniform resizing for efficient visual tracking. NIPS, 36:50959-50977, 2023. 7" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 332, + 294, + 367 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 332, + 294, + 367 + ], + "spans": [ + { + "bbox": [ + 56, + 332, + 294, + 367 + ], + "type": "text", + "content": "[39] Thijs P. Kuipers, Devanshu Arya, and Deepak K. Gupta. Hard occlusions in visual object tracking. In ECCV Workshops, 2020. 3" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 369, + 294, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 369, + 294, + 392 + ], + "spans": [ + { + "bbox": [ + 56, + 369, + 294, + 392 + ], + "type": "text", + "content": "[40] Hei Law and Jia Deng. Cornernet: Detecting objects as paired keypoints. IJCV, 128:642-656, 2018. 5" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 395, + 294, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 395, + 294, + 430 + ], + "spans": [ + { + "bbox": [ + 56, + 395, + 294, + 430 + ], + "type": "text", + "content": "[41] Luming Li, Chenglizhao Chen, and Xiaowei Zhang. Mask-guided self-distillation for visual tracking. In ICME, pages 1-6, 2022. 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 432, + 294, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 432, + 294, + 479 + ], + "spans": [ + { + "bbox": [ + 56, + 432, + 294, + 479 + ], + "type": "text", + "content": "[42] Shuiwang Li, Yuting Liu, Qijun Zhao, and Ziliang Feng. Learning residue-aware correlation filters and refining scale for real-time uav tracking. Pattern Recognition, 127:108614, 2022. 1, 2, 3, 6" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 480, + 294, + 515 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 480, + 294, + 515 + ], + "spans": [ + { + "bbox": [ + 56, + 480, + 294, + 515 + ], + "type": "text", + "content": "[43] Shuiwang Li, Xiangyang Yang, and et al. Learning target-aware vision transformers for real-time uav tracking. IEEE TGRS, 2024. 1" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 517, + 294, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 517, + 294, + 564 + ], + "spans": [ + { + "bbox": [ + 56, + 517, + 294, + 564 + ], + "type": "text", + "content": "[44] Shuiwang Li, Yangxiang Yang, Dan Zeng, and Xucheng Wang. Adaptive and background-aware vision transformer for real-time uav tracking. In ICCV, pages 13943-13954, 2023. 1, 2, 6" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 567, + 294, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 567, + 294, + 601 + ], + "spans": [ + { + "bbox": [ + 56, + 567, + 294, + 601 + ], + "type": "text", + "content": "[45] Siyi Li and D. Y. Yeung. Visual object tracking for unmanned aerial vehicles: A benchmark and new motion models. In AAAI, 2017. 5, 6" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 56, + 604, + 294, + 651 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 604, + 294, + 651 + ], + "spans": [ + { + "bbox": [ + 56, + 604, + 294, + 651 + ], + "type": "text", + "content": "[46] Yiming Li, Changhong Fu, Fangqiang Ding, and et al. Autotrack: Towards high-performance visual tracking for uav with automatic spatio-temporal regularization. In CVPR, pages 11920-11929, 2020. 1, 2, 6" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 56, + 653, + 294, + 688 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 653, + 294, + 688 + ], + "spans": [ + { + "bbox": [ + 56, + 653, + 294, + 688 + ], + "type": "text", + "content": "[47] Yongxin Li, Mengyuan Liu, You Wu, and et al. Learning adaptive and view-invariant vision transformer for real-time uav tracking. In ICML, 2024. 6" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 56, + 689, + 294, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 689, + 294, + 713 + ], + "spans": [ + { + "bbox": [ + 56, + 689, + 294, + 713 + ], + "type": "text", + "content": "[48] Tsung Yi Lin, Michael Maire, and et al. Microsoft coco: Common objects in context. In ECCV, 2014. 6" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 315, + 72, + 553, + 713 + ], + "type": "list", + "angle": 0, + "index": 37, + "blocks": [ + { + "bbox": [ + 315, + 72, + 553, + 108 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 72, + 553, + 108 + ], + "spans": [ + { + "bbox": [ + 315, + 72, + 553, + 108 + ], + "type": "text", + "content": "[49] Mengyuan Liu, Yuelong Wang, and et al. Global filter pruning with self-attention for real-time uav tracking. In BMVC, page 861, 2022. 1" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 315, + 110, + 553, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 110, + 553, + 144 + ], + "spans": [ + { + "bbox": [ + 315, + 110, + 553, + 144 + ], + "type": "text", + "content": "[50] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017. 6" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 315, + 147, + 553, + 180 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 147, + 553, + 180 + ], + "spans": [ + { + "bbox": [ + 315, + 147, + 553, + 180 + ], + "type": "text", + "content": "[51] David G Lowe. Object recognition from local scale-invariant features. In ICCV, pages 1150-1157, 1999. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 315, + 183, + 553, + 219 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 183, + 553, + 219 + ], + "spans": [ + { + "bbox": [ + 315, + 183, + 553, + 219 + ], + "type": "text", + "content": "[52] Siyu Ma, Yuting Liu, and et al. Learning disentangled representation in pruning for real-time uav tracking. In ACML, pages 690-705, 2023. 1" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 315, + 220, + 553, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 220, + 553, + 255 + ], + "spans": [ + { + "bbox": [ + 315, + 220, + 553, + 255 + ], + "type": "text", + "content": "[53] Torsten Mattfeldt. Stochastic geometry and its applications. Journal of Microscopy, 183:257-257, 1996. 4" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 315, + 258, + 553, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 258, + 553, + 293 + ], + "spans": [ + { + "bbox": [ + 315, + 258, + 553, + 293 + ], + "type": "text", + "content": "[54] Christoph Mayer, Martin Danelljan, and et al. Learning target candidate association to keep track of what not to track. In ICCV, pages 13424-13434, 2021. 7" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 315, + 294, + 553, + 329 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 294, + 553, + 329 + ], + "spans": [ + { + "bbox": [ + 315, + 294, + 553, + 329 + ], + "type": "text", + "content": "[55] Christoph Mayer, Martin Danelljan, and et al. Transforming model prediction for tracking. In CVPR, pages 8721-8730, 2022. 7" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 315, + 331, + 553, + 366 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 331, + 553, + 366 + ], + "spans": [ + { + "bbox": [ + 315, + 331, + 553, + 366 + ], + "type": "text", + "content": "[56] Matthias Mueller, Adel Bibi, and et al. Trackingnet: A large-scale dataset and benchmark for object tracking in the wild. In ECCV, pages 300-317, 2018. 6" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 315, + 369, + 553, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 369, + 553, + 403 + ], + "spans": [ + { + "bbox": [ + 315, + 369, + 553, + 403 + ], + "type": "text", + "content": "[57] Matthias Mueller, Neil G. Smith, and Bernard Ghanem. A benchmark and simulator for uav tracking. In ECCV, 2016. 5, 6, 8" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 315, + 405, + 553, + 440 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 405, + 553, + 440 + ], + "spans": [ + { + "bbox": [ + 315, + 405, + 553, + 440 + ], + "type": "text", + "content": "[58] Hieu Tat Nguyen and Arnold W. M. Smeulders. Fast occluded object tracking by a robust appearance filter. IEEE TPAMI, 26:1099-1104, 2004. 3" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 315, + 442, + 553, + 477 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 442, + 553, + 477 + ], + "spans": [ + { + "bbox": [ + 315, + 442, + 553, + 477 + ], + "type": "text", + "content": "[59] Hieu Tat Nguyen, Marcel Worring, and Rein van den Boomgaard. Occlusion robust adaptive template tracking. In ICCV, pages 678-683, 2001. 3" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 315, + 479, + 550, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 479, + 550, + 491 + ], + "spans": [ + { + "bbox": [ + 315, + 479, + 550, + 491 + ], + "type": "text", + "content": "[60] Toby C. O’Neil. Geometric measure theory. 2002. 4" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 315, + 493, + 553, + 515 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 493, + 553, + 515 + ], + "spans": [ + { + "bbox": [ + 315, + 493, + 553, + 515 + ], + "type": "text", + "content": "[61] Jiyan Pan and Bo Hu. Robust occlusion handling in object tracking. In CVPR, pages 1-8, 2007. 3" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 315, + 517, + 553, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 517, + 553, + 552 + ], + "spans": [ + { + "bbox": [ + 315, + 517, + 553, + 552 + ], + "type": "text", + "content": "[62] Joo Hyun Park, Yeong Min Oh, and et al. Handoccnet: Occlusion-robust 3d hand mesh estimation network. In CVPR, pages 1486–1495, 2022. 2, 3" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 315, + 554, + 553, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 554, + 553, + 578 + ], + "spans": [ + { + "bbox": [ + 315, + 554, + 553, + 578 + ], + "type": "text", + "content": "[63] Wonpyo Park and et al. Relational knowledge distillation. In CVPR, pages 3962-3971, 2019. 3" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 315, + 579, + 553, + 614 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 579, + 553, + 614 + ], + "spans": [ + { + "bbox": [ + 315, + 579, + 553, + 614 + ], + "type": "text", + "content": "[64] Zhimao Peng, Zechao Li, Junge Zhang, and et al. Few-shot image recognition with knowledge transfer. In ICCV, pages 441-449, 2019. 3" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 315, + 616, + 553, + 651 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 616, + 553, + 651 + ], + "spans": [ + { + "bbox": [ + 315, + 616, + 553, + 651 + ], + "type": "text", + "content": "[65] Huafeng Qin, Xin Jin, Yun Jiang, Mounim A El-Yacoubi, and Xinbo Gao. Adversarial automixup. arXiv preprint arXiv:2312.11954, 2023. 7" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 315, + 653, + 553, + 688 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 653, + 553, + 688 + ], + "spans": [ + { + "bbox": [ + 315, + 653, + 553, + 688 + ], + "type": "text", + "content": "[66] Delin Qu, Yizhen Lao, and et al. Towards nonlinear-motion-aware and occlusion-robust rolling shutter correction. ICCV, pages 10646–10654, 2023. 3" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 315, + 689, + 553, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 689, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 315, + 689, + 553, + 713 + ], + "type": "text", + "content": "[67] Seyed Hamid Rezatofighi, Nathan Tsoi, JunYoung Gwak, and et al. Generalized intersection over union:" + } + ] + } + ], + "index": 36 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 295, + 712 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 78, + 72, + 294, + 96 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 72, + 294, + 96 + ], + "spans": [ + { + "bbox": [ + 78, + 72, + 294, + 96 + ], + "type": "text", + "content": "A metric and a loss for bounding box regression. \nCVPR, pages 658-666, 2019. 2, 5" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 99, + 295, + 133 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 99, + 295, + 133 + ], + "spans": [ + { + "bbox": [ + 56, + 99, + 295, + 133 + ], + "type": "text", + "content": "[68] Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, and et al. Imagenet large scale visual recognition challenge. IJCV, 115:211 - 252, 2014. 5" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 137, + 294, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 137, + 294, + 172 + ], + "spans": [ + { + "bbox": [ + 56, + 137, + 294, + 172 + ], + "type": "text", + "content": "[69] Liangtao Shi, Bineng Zhong, Qihua Liang, Ning Li, Shengping Zhang, and Xianxian Li. Explicit visual prompts for visual object tracking. In AAAI, 2024. 7" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 175, + 294, + 221 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 175, + 294, + 221 + ], + "spans": [ + { + "bbox": [ + 56, + 175, + 294, + 221 + ], + "type": "text", + "content": "[70] Abhinav Shrivastava, Abhinav Kumar Gupta, and Ross B. Girshick. Training region-based object detectors with online hard example mining. In CVPR, pages 761-769, 2016. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 224, + 294, + 259 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 224, + 294, + 259 + ], + "spans": [ + { + "bbox": [ + 56, + 224, + 294, + 259 + ], + "type": "text", + "content": "[71] Markus Storer and et al. Active appearance model fitting under occlusion using fast-robust pca. In VISAPP, pages 129–136, 2009. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 262, + 294, + 309 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 262, + 294, + 309 + ], + "spans": [ + { + "bbox": [ + 56, + 262, + 294, + 309 + ], + "type": "text", + "content": "[72] Chen Sun and et al. Siamohot: A lightweight dual siamese network for onboard hyperspectral object tracking via joint spatial-spectral knowledge distillation. IEEE TGRS, 61:1-12, 2023. 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 312, + 294, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 312, + 294, + 348 + ], + "spans": [ + { + "bbox": [ + 56, + 312, + 294, + 348 + ], + "type": "text", + "content": "[73] Hugo Touvron and et al. Training data-efficient image transformers & distillation through attention. In ICML, pages 10347-10357, 2021. 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 350, + 294, + 386 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 350, + 294, + 386 + ], + "spans": [ + { + "bbox": [ + 56, + 350, + 294, + 386 + ], + "type": "text", + "content": "[74] Wenxuan Tu, Sihang Zhou, and et al. Hierarchically contrastive hard sample mining for graph self-supervised pretraining. IEEE TNNLS, PP, 2023. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 388, + 294, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 388, + 294, + 422 + ], + "spans": [ + { + "bbox": [ + 56, + 388, + 294, + 422 + ], + "type": "text", + "content": "[75] Frederick Tung and Greg Mori. Similarity-preserving knowledge distillation. In ICCV, pages 1365-1374, 2019. 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 426, + 294, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 426, + 294, + 460 + ], + "spans": [ + { + "bbox": [ + 56, + 426, + 294, + 460 + ], + "type": "text", + "content": "[76] K. Wang and et al. Region attention networks for pose and occlusion robust facial expression recognition. IEEE TIP, 29:4057-4069, 2019. 2, 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 464, + 294, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 464, + 294, + 499 + ], + "spans": [ + { + "bbox": [ + 56, + 464, + 294, + 499 + ], + "type": "text", + "content": "[77] Keze Wang and et al. Towards human-machine cooperation: Self-supervised sample mining for object detection. In CVPR, pages 1605-1613, 2018. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 502, + 294, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 502, + 294, + 548 + ], + "spans": [ + { + "bbox": [ + 56, + 502, + 294, + 548 + ], + "type": "text", + "content": "[78] Lin Wang and Kuk-Jin Yoon. Knowledge distillation and student-teacher learning for visual intelligence: A review and new outlooks. IEEE TPAMI, 44:3048-3068, 2020. 3, 5" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 552, + 294, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 552, + 294, + 599 + ], + "spans": [ + { + "bbox": [ + 56, + 552, + 294, + 599 + ], + "type": "text", + "content": "[79] Xucheng Wang, Xiangyang Yang, and et al. Learning disentangled representation with mutual information maximization for real-time uav tracking. In ICME, pages 1331-1336, 2023. 1" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 602, + 294, + 637 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 602, + 294, + 637 + ], + "spans": [ + { + "bbox": [ + 56, + 602, + 294, + 637 + ], + "type": "text", + "content": "[80] Xucheng Wang, Dan Zeng, Qijun Zhao, and Shuiwang Li. Rank-based filter pruning for real-time uav tracking. In ICME, pages 01-06, 2022. 1, 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 56, + 640, + 294, + 663 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 640, + 294, + 663 + ], + "spans": [ + { + "bbox": [ + 56, + 640, + 294, + 663 + ], + "type": "text", + "content": "[81] Xing Wei, Yifan Bai, and et al. Autoregressive visual tracking. In CVPR, pages 9697-9706, 2023. 8" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 56, + 666, + 294, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 666, + 294, + 712 + ], + "spans": [ + { + "bbox": [ + 56, + 666, + 294, + 712 + ], + "type": "text", + "content": "[82] Qiangqiang Wu, Tianyu Yang, and et al. Dropmae: Masked autoencoders with spatial-attention dropout for tracking tasks. In CVPR, pages 14561-14571, 2023. 1, 8" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 72, + 553, + 712 + ], + "type": "list", + "angle": 0, + "index": 33, + "blocks": [ + { + "bbox": [ + 316, + 72, + 553, + 107 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 72, + 553, + 107 + ], + "spans": [ + { + "bbox": [ + 316, + 72, + 553, + 107 + ], + "type": "text", + "content": "[83] Wanying Wu, Pengzhi Zhong, and Shuiwang Li. Fisher pruning for real-time uav tracking. In IJCNN, pages 1-7, 2022. 1, 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 110, + 553, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 110, + 553, + 156 + ], + "spans": [ + { + "bbox": [ + 316, + 110, + 553, + 156 + ], + "type": "text", + "content": "[84] You Wu, Xucheng Wang, Dan Zeng, and et al. Learning motion blur robust vision transformers with dynamic early exit for real-time uav tracking. arXiv preprint arXiv:2407.05383, 2024.1" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 158, + 553, + 192 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 158, + 553, + 192 + ], + "spans": [ + { + "bbox": [ + 316, + 158, + 553, + 192 + ], + "type": "text", + "content": "[85] Fei Xie, Chunyu Wang, and et al. Learning tracking representations via dual-branch fully transformer networks. In ICCV, pages 2688–2697, 2021. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 194, + 553, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 194, + 553, + 228 + ], + "spans": [ + { + "bbox": [ + 316, + 194, + 553, + 228 + ], + "type": "text", + "content": "[86] Fei Xie, Chunyu Wang, Guangting Wang, and et al. Correlation-aware deep tracking. In CVPR, pages 8741-8750, 2022. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 230, + 553, + 265 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 230, + 553, + 265 + ], + "spans": [ + { + "bbox": [ + 316, + 230, + 553, + 265 + ], + "type": "text", + "content": "[87] Jinxia Xie and et al. Autoregressive queries for adaptive tracking with spatio-temporal transformers. In CVPR, pages 19300-19309, 2024. 6, 7" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 266, + 553, + 301 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 266, + 553, + 301 + ], + "spans": [ + { + "bbox": [ + 316, + 266, + 553, + 301 + ], + "type": "text", + "content": "[88] Di Yang and et al. Self-supervised video pose representation learning for occlusion- robust action recognition. In AFGR, pages 1-5, 2021. 3" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 304, + 553, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 304, + 553, + 338 + ], + "spans": [ + { + "bbox": [ + 316, + 304, + 553, + 338 + ], + "type": "text", + "content": "[89] Xiangyang Yang, Dan Zeng, and et al. Adaptively bypassing vision transformer blocks for efficient visual tracking. Pattern Recognition, 161:111278, 2025. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 339, + 553, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 339, + 553, + 374 + ], + "spans": [ + { + "bbox": [ + 316, + 339, + 553, + 374 + ], + "type": "text", + "content": "[90] Liangliang Yao, Changhong Fu, and et al. Sgdvit: Saliency-guided dynamic vision transformer for uav tracking. arXiv preprint arXiv:2303.04378, 2023. 6" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 376, + 553, + 411 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 376, + 553, + 411 + ], + "spans": [ + { + "bbox": [ + 316, + 376, + 553, + 411 + ], + "type": "text", + "content": "[91] Botao Ye, Hong Chang, and et al. Joint feature learning and relation modeling for tracking: A one-stream framework. In ECCV, pages 341-357, 2022. 1, 2, 5, 7" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 412, + 553, + 457 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 412, + 553, + 457 + ], + "spans": [ + { + "bbox": [ + 316, + 412, + 553, + 457 + ], + "type": "text", + "content": "[92] Sangdoo Yun, Dongyoon Han, and et al. Cutmix: Regularization strategy to train strong classifiers with localizable features. In ICCV, pages 6023-6032, 2019. 7" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 316, + 460, + 553, + 507 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 460, + 553, + 507 + ], + "spans": [ + { + "bbox": [ + 316, + 460, + 553, + 507 + ], + "type": "text", + "content": "[93] Dan Zeng, Mingliang Zou, Xucheng Wang, and Shuiwang Li. Towards discriminative representations with contrastive instances for real-time uav tracking. In ICME, pages 1349-1354, 2023. 6" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 316, + 509, + 553, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 509, + 553, + 544 + ], + "spans": [ + { + "bbox": [ + 316, + 509, + 553, + 544 + ], + "type": "text", + "content": "[94] Chenyuan Zhang, Jiu Xu, and et al. A klt-based approach for occlusion handling in human tracking. In PCS, pages 337-340, 2012. 3" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 316, + 545, + 553, + 580 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 545, + 553, + 580 + ], + "spans": [ + { + "bbox": [ + 316, + 545, + 553, + 580 + ], + "type": "text", + "content": "[95] Yi Zhang, Pengliang Ji, and et al. 3d-aware neural body fitting for occlusion robust 3d human pose estimation. ICCV, pages 9365-9376, 2023. 3" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 316, + 582, + 553, + 627 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 582, + 553, + 627 + ], + "spans": [ + { + "bbox": [ + 316, + 582, + 553, + 627 + ], + "type": "text", + "content": "[96] Haojie Zhao, Dong Wang, and Huchuan Lu. Representation learning for visual object tracking by masked appearance transfer. In CVPR, pages 18696-18705, 2023. 7" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 316, + 629, + 553, + 664 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 629, + 553, + 664 + ], + "spans": [ + { + "bbox": [ + 316, + 629, + 553, + 664 + ], + "type": "text", + "content": "[97]Zikun Zhou, Wenjie Pei, Xin Li, and et al. Saliencyassociated object tracking. In ICCV, pages 9846- 9855,2021.7" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 316, + 666, + 553, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 666, + 553, + 712 + ], + "spans": [ + { + "bbox": [ + 316, + 666, + 553, + 712 + ], + "type": "text", + "content": "[98] Pengfei Zhu, Longyin Wen, and et al. Visdrone-vdt2018: The vision meets drone video detection and tracking challenge results. In ECCV Workshops, 2018. 5, 6, 7, 8" + } + ] + } + ], + "index": 32 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09246/056a1ec9-5612-40a0-bb59-6e9ca9d014d6_content_list.json b/data/2025/2504_09xxx/2504.09246/056a1ec9-5612-40a0-bb59-6e9ca9d014d6_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..3d171d303b3adba2a11fcd942fcaa5dfd6479c69 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/056a1ec9-5612-40a0-bb59-6e9ca9d014d6_content_list.json @@ -0,0 +1,4628 @@ +[ + { + "type": "text", + "text": "Type-Constrained Code Generation with Language Models", + "text_level": 1, + "bbox": [ + 88, + 113, + 884, + 138 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "NIELS MündLER*, ETH Zurich, Switzerland", + "bbox": [ + 86, + 150, + 467, + 166 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "JINGXUAN HE*, UC Berkeley, USA", + "bbox": [ + 88, + 169, + 386, + 186 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "HAO WANG,UC Berkeley,USA", + "bbox": [ + 90, + 188, + 358, + 202 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "KOUSHIK SEN, UC Berkeley, USA", + "bbox": [ + 90, + 205, + 380, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "DAWN SONG, UC Berkeley, USA", + "bbox": [ + 90, + 223, + 372, + 238 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "MARTIN VECHEV, ETH Zurich, Switzerland", + "bbox": [ + 90, + 241, + 462, + 256 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Large language models (LLMs) have achieved notable success in code generation. However, they still frequently produce uncompilable output because their next-token inference procedure does not model formal aspects of code. Although constrained decoding is a promising approach to alleviate this issue, it has only been applied to handle either domain-specific languages or syntactic features of general-purpose programming languages. However, LLMs frequently generate code with typing errors, which are beyond the domain of syntax and generally hard to adequately constrain. To address this challenge, we introduce a type-constrained decoding approach that leverages type systems to guide code generation. For this purpose, we develop novel prefix automata and a search over inhabitable types, forming a sound approach to enforce well-typedness on LLM-generated code. We formalize our approach on a foundational simply-typed language and extend it to TypeScript to demonstrate practicality. Our evaluation on the HumanEval and MBPP datasets shows that our approach reduces compilation errors by more than half and significantly increases functional correctness in code synthesis, translation, and repair tasks across LLMs of various sizes and model families, including state-of-the-art open-weight models with more than 30B parameters. The results demonstrate the generality and effectiveness of our approach in constraining LLM code generation with formal rules of type systems.", + "bbox": [ + 86, + 266, + 907, + 479 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "CCS Concepts: • Theory of computation → Formal languages and automata theory; • Software and its engineering → General programming languages; • Computing methodologies → Machine learning.", + "bbox": [ + 86, + 486, + 907, + 516 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Additional Key Words and Phrases: Code Generation, Language Model, Type System, Program Synthesis, Program Translation, Program Repair, Constrained Decoding", + "bbox": [ + 86, + 522, + 907, + 555 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 90, + 566, + 244, + 581 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Large language models (LLMs) are remarkably successful in diverse fields [12, 27, 49] and increasingly used in everyday coding tasks [25, 68]. They show promising capabilities at synthesizing code from natural language descriptions [37, 59], translating between programming languages [59], and repairing incorrect programs [44, 74]. Despite these achievements, LLM-generated code often contains compilation errors, logic flaws, or security vulnerabilities [20, 53, 55]. These issues arise because LLMs generate code by iteratively sampling the next token from a vocabulary of tokens – a probabilistic process that does not provide any formal guarantees.", + "bbox": [ + 86, + 587, + 909, + 702 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "A promising technique to address this limitation is constrained decoding, which enforces the formal rules of programming languages during LLMs' code generation process, rejecting invalid tokens and ensuring only valid tokens are considered as generation candidates. Previous studies have shown that constrained decoding improves adherence to program syntax [8, 41, 57, 66].", + "bbox": [ + 86, + 704, + 907, + 770 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "*Both authors co-lead this project.", + "bbox": [ + 88, + 779, + 323, + 794 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Authors' Contact Information: Niels Mündler, niels.muendler@inf.ethz.ch, ETH Zurich, Switzerland; Jingxuan He, jingxuan. he@berkeley.edu, UC Berkeley, USA; Hao Wang, hwang628@berkeley.edu, UC Berkeley, USA; Koushik Sen, ksen@berkeley. edu, UC Berkeley, USA; Dawn Song, dawnsong@berkeley.edu, UC Berkeley, USA; Martin Vechev, martin.vechev@inf.ethz.ch, ETH Zurich, Switzerland.", + "bbox": [ + 86, + 804, + 907, + 859 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/8791541988ff67791f7d7b874ab136d22e0e41969cd1283171572f806c51e768.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 90, + 870, + 195, + 897 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.09246v2 [cs.LG] 8 May 2025", + "bbox": [ + 30, + 188, + 76, + 670 + ], + "page_idx": 0 + }, + { + "type": "footer", + "text": "This work is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License.", + "bbox": [ + 88, + 900, + 753, + 913 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "However, these improvements are limited, as syntax accounts for only a small part of overall program correctness. For instance, in our evaluation of state-of-the-art open-weight LLMs (§5), syntactic errors make up on average $6\\%$ of all compilation errors in generated TypeScript code.", + "bbox": [ + 86, + 118, + 909, + 168 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Key Challenge: Generating Well-Typed Code. Beyond program syntax, type systems detect and reject bugs at compile time [40, 43] and are therefore enforced in many popular programming languages [4, 10, 19]. We observe that LLMs struggle to generate well-typed code [20, 29, 63], as typing rules significantly complicate the generation of valid code [62]. In our evaluation of LLMs (§5), on average $94\\%$ of compilation errors result from failing type checks. This suggests a promising direction: guiding LLMs' code generation process by incorporating the formal rules of type systems.", + "bbox": [ + 86, + 179, + 909, + 279 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "However, implementing this approach is challenging because type systems can in general not be captured by context-free grammars [43], prohibiting the application of prior constrained decoding methods developed for program syntax [8, 66]. Furthermore, besides deriving and maintaining a type environment for completed expressions during generation (similar to classic type systems), we need to accurately assess and handle partial expressions. Specifically, for each currently generated partial expression, we must decide whether the partial expression can be completed to match a required type. Determining this would allow us to constrain the LLM to provably generate well-typed expressions upon termination, but involves solving the challenging problem of type inhabitation [30, 67] in the novel context of LLM-based code generation.", + "bbox": [ + 86, + 279, + 909, + 430 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This Work: Type-Constrained Decoding. In this work, we introduce type-constrained decoding1, addressing the challenge of generating well-typed code using LLMs. We develop a sound algorithm to determine if a partial program can be completed into a well-typed program. This algorithm is based on a novel non-deterministic automaton we construct. The automaton incrementally builds abstract syntax trees described by the partial program and annotates them with type-relevant context, e.g., declared identifiers and expression types. It leverages such information to maintain a prefix property, ensuring that parsing a program prefix only results in a non-empty set of states when it can be completed into a well-typed program. To guarantee the prefix property, we design a sound type search algorithm that determines whether a partial expression can inhabit a given type. We construct our automaton for a generic, simply-typed Turing-complete calculus [10].", + "bbox": [ + 86, + 440, + 909, + 606 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To demonstrate its practical effectiveness, we instantiate our approach on a non-trivial subset of TypeScript. We choose TypeScript for three key reasons: (i) it is currently one of the most actively used languages, e.g., in open-source projects on GitHub [26, 38]; (ii) as we show, state-of-the-art LLMs fail to reliably generate well-typed TypeScript code; (iii) its core type system is simple enough [10] to be suitable for developing the first prototype of our approach. We perform a comprehensive evaluation on TypeScript versions of the widely-used HumanEval and MBPP benchmarks [5, 13, 14], focusing on three common coding tasks: synthesis, translation, and repair. Our experimental results show that type-constrained decoding significantly enhances code generation for LLMs of various sizes (2B-34B parameters). For synthesis and translation, it reduces compilation errors by more than half and increases functional correctness relatively by $3.5\\%$ to $5.5\\%$ . Additionally, it enhances functionally correct repair of non-compiling code relatively by $37\\%$ on average. We further investigate our approach in depth through a runtime analyses and case studies.", + "bbox": [ + 86, + 606, + 909, + 806 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We highlight that our type constraining approach is broadly applicable to any language derivable from the core calculus, any code generation task in these languages, and any LLM utilizing next-token generation. In §6, we envision how our approach can benefit other production-ready languages and closed-weight LLMs.", + "bbox": [ + 86, + 806, + 909, + 873 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "171:2", + "bbox": [ + 90, + 83, + 129, + 95 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev", + "bbox": [ + 339, + 81, + 907, + 95 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "1Our code implementation is publicly available at https://github.com/eth-sri/type-constrained-code-generation.", + "bbox": [ + 88, + 898, + 843, + 913 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Main Contributions. Our main contributions can be summarized as follows:", + "bbox": [ + 88, + 118, + 761, + 134 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- A prefix automaton and a type search algorithm to enable type constraining for LLM-based code generation, demonstrated on a generic, simply-typed core calculus (§3).", + "- An instantiation and extension of our approach to the popular TypeScript language (§4).", + "- An extensive evaluation across various LLMs and coding tasks, showing the significant benefit of our approach in reducing compilation errors and increasing functional correctness (§5)." + ], + "bbox": [ + 106, + 138, + 905, + 220 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Background and Overview", + "text_level": 1, + "bbox": [ + 88, + 233, + 378, + 250 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this section, we first provide relevant background on LLM-based code generation and constrained decoding. Then, we motivate our type constraining approach using an illustrative example and present a high-level overview of its construction.", + "bbox": [ + 86, + 252, + 907, + 302 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1 Background on LLM-based Code Generation and Constrained Decoding", + "text_level": 1, + "bbox": [ + 86, + 316, + 814, + 333 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "LLM-based Code Generation. LLMs generate code incrementally by sampling one token at a time in an iterative manner, as depicted in Algorithm 1 (without the blue highlights). A user prompt $x$ specifies a code generation task for a trained LLM. At Line 1, the output program $s$ is initialized to an empty string or a program prefix provided in $x$ , e.g., a function signature. At the beginning of each generation iteration (Line 3), the LLM takes as input a concatenation $x \\circ s$ of the prompt $x$ and the current partial program $s$ . It then predicts a probability distribution $v$ over a fixed, finite set of tokens, the vocabulary, where each token may be a single Unicode character or a string of multiple characters. All common singleton characters are included in LLMs' vocabulary, ensuring that any standard program can be produced by concatenating tokens [60]. Next, based on distri", + "bbox": [ + 86, + 336, + 483, + 651 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Algorithm 1 Vanilla LLM-based code generation (without the blue highlights) vs. constrained decoding (with the blue highlights)", + "bbox": [ + 493, + 351, + 907, + 401 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Input: LLM, prompt $x$ , completion engine $CE_L$ for language $L$", + "bbox": [ + 493, + 406, + 905, + 438 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Output: Program $s$ such that $s \\in L$", + "bbox": [ + 495, + 440, + 796, + 456 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1: initialize s", + "2: while true do", + "3: $\\pmb{v} := \\mathrm{LLM}(x \\circ s)$", + "4: while true do", + "5: $t\\sim \\pmb{v}$", + "6: if $CE_L(s \\circ t)$ then break", + "7: elif $t = EOS$ and $s \\in L$ then break", + "8: else $\\pmb{v}[t] := 0$ ; normalize $\\pmb{v}$", + "9: if $t = EOS$ then break", + "10: $s := s \\circ t$", + "11: return s" + ], + "bbox": [ + 502, + 456, + 901, + 636 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "bution $\\pmb{v}$ , a token $t$ is sampled (Line 5) and appended to the program $s$ (Line 10). This process is repeated until we encounter the special token EOS which signifies the end of the sequence (Line 9).", + "bbox": [ + 86, + 652, + 909, + 684 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "LLMs learn to predict adequate probability distributions from extensive training on natural and programming languages [12, 59, 73]. These distributions implicitly encode language rules, allowing LLMs to successfully solve code generation tasks [13, 28, 59]. However, LLMs may fail to infer complex rules [9, 21, 72], derive incomplete rules for less common languages [13, 51], and, due to the probabilistic nature of its generation procedure, not consistently follow formal language rules.", + "bbox": [ + 86, + 684, + 907, + 769 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Constrained Decoding. The aforementioned shortcoming of LLMs can be mitigated by employing constrained decoding, which analyzes the intermediate model outputs $s$ during the generation process and enforces that only valid tokens are incorporated. Specifically, constrained decoding leverages a completion engine $CE_{L}$ , specific to a language $L$ . Computing $CE_{L}(s)$ returns whether partial program $s$ can be completed to a well-formed program in $L$ , meaning whether there exists a (possibly empty) string $s'$ such that $s \\circ s' \\in L$ . Equivalently, $CE_{L}(s)$ determines whether $s$ belongs to the prefix language $L^{p}$ of $L$ , i.e., whether $s \\in L^{p}$ . $L^{p}$ is formally defined as follows:", + "bbox": [ + 86, + 776, + 909, + 894 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "DEFINITION 1. For a given language $L$ , its prefix language is $L^p \\coloneqq \\{s \\mid \\exists s': s \\circ s' \\in L\\}$ .", + "bbox": [ + 106, + 898, + 829, + 916 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Type-Constrained Code Generation with Language Models", + "bbox": [ + 88, + 81, + 495, + 97 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "171:3", + "bbox": [ + 868, + 83, + 907, + 95 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/aed795b7a573ef796c6718dfe29dd3f1629d86f6914bb2b22785860942121b53.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
function is_int(text: string): boolean {<completion>VanillaSyntaxTypesDescription
const num = Number(text);(1) ;acceptrejectrejectSyntactically invalid
return !isNaN(num) &&(2) beracceptacceptrejectUndeclared identifier
parseInt(num <completion>(3) ()acceptacceptrejectDisallowed operator
(4), 10)(5).string()acceptacceptrejectInvalid argument type
acceptacceptacceptWell-formed option
", + "bbox": [ + 96, + 115, + 903, + 220 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Figure 1. Left is a partial TypeScript program derived from instance #113 of the MBPP benchmark [5], awaiting completion. Right are five completion options: (1)-(4) are invalid and (5) is well-formed. Our type-constrained decoding is the only approach capable of correctly rejecting invalid completions and accepting the valid one.", + "bbox": [ + 86, + 227, + 907, + 275 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As illustrated in blue highlights of Algorithm 1, constrained decoding differs from vanilla LLM-based code generation by adding an additional sample-and-check loop at Line 4 around the token sampling process at Line 5. A sampled token $t$ is considered further only if $s \\circ t$ can be completed to a well-formed program (Line 6) or $t$ is EOS and $s$ is already well-formed in $L$ (Line 7). Otherwise, the probability of $t$ is set to zero at Line 8, and the sample-and-check loop repeats. Note that a token $t$ satisfying either Line 6 or Line 7 always exists, because $s$ is in $L^p$ and LLMs' vocabulary contains all common characters. Therefore, the number of iterations of the loop at Line 4 is bounded by the fixed LLM vocabulary size. In practice, only few iterations are needed (\\$5.3) and do not require additional LLM inference, ensuring a reasonable runtime overhead compared to vanilla decoding.", + "bbox": [ + 86, + 300, + 909, + 448 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The token-level guarantees extend inductively to guarantee the final program's validity with respect to $L$ . At Line 1, we start with a valid prefix in $L^p$ , i.e., either an empty string or a valid prefix provided in the user prompt. The check at Line 6 ensures that all intermediate outputs $s$ are prefixes in $L^p$ . Additionally, Line 7 and Line 9 ensure that the return statement in Line 11 is reached only if $s \\in L$ . As an additional benefit, by steering previously ill-formed generations into well-formed ones, constrained decoding also increases the likelihood of generating functionally correct code.", + "bbox": [ + 86, + 450, + 907, + 548 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Note that commonly used grammar and type checkers can not be used as a completion engine for constrained decoding. They judge whether a program string $s$ is well-formed according to the language $L$ , i.e., whether $s \\in L$ . When $s$ is not a complete program in $L$ , but a valid prefix in $L^p$ , they return a different output than $CE_L(s)$ , which is not suitable for use in Algorithm 1.", + "bbox": [ + 86, + 550, + 909, + 615 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.2 Overview of Our Type Constraining Approach", + "text_level": 1, + "bbox": [ + 86, + 627, + 572, + 645 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Inadequacy of Syntax-Only Constraining. To apply the constrained decoding algorithm described in §2, one needs to choose a language $L$ and implement the completion engine $CE_{L}$ . Recent work has explored defining $L$ as the set of syntactically valid programs, thus leveraging the syntactic rules of programming languages for constrained decoding [8, 66, 71]. However, the benefits of this approach are limited, because syntax accounts for only a small portion of overall program correctness. For instance, across our evaluations (§5), only $3.5\\%$ of the functional errors and $6\\%$ of the compilation errors in LLM-generated code are due to syntactic errors.", + "bbox": [ + 86, + 650, + 909, + 763 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We illustrate this limitation using the example in Figure 1. It presents five completion candidates for a partial program: (1)-(4) will lead to compilation errors and only (5) can result in a well-formed program. Based on syntax, completions that contain line terminations or invalid characters (e.g., $) could be rejected (1). However, many other cases, including (2)-(4), do not break syntactic rules but still cause compilation errors. For instance, candidate (2) results in accessing an undeclared identifier. In candidate (3), the function call operator will fail at execution time, as num is a number and can not be called. Candidate (4) passes a value of unexpected format to parseInt, which expects the first argument to be a string. In this example, (4) is generated by CodeLlama 34B [59]. Syntax-only constraining accepts this invalid completion, leading to a non-compilable final output.", + "bbox": [ + 86, + 765, + 909, + 915 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "171:4", + "bbox": [ + 90, + 84, + 129, + 95 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev", + "bbox": [ + 339, + 81, + 907, + 97 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Our Approach: Leveraging the Type System. We require stronger constraints to effectively guide the model generation. Beyond syntax, type systems are commonly utilized in compilers, enforcing semantic rules to detect and reject bugs at compile time [23]. For Figure 1, the TypeScript type system would correctly reject code containing erroneous completions (2)-(4). Therefore, in this work, we propose leveraging type systems in constrained decoding to guide code generation. Our method accurately detects that only candidate (5) is a valid completion, guiding CodeLlama 34B to adopt this option and complete the program correctly. As detailed in §5, our experimental results demonstrate that our approach more than halves compiler errors in generated code and consistently increases the proportion of functionally correct programs.", + "bbox": [ + 86, + 118, + 909, + 266 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Incorporating typing rules into code generation offers substantial potential but presents a significant challenge. Previous research has focused primarily on constrained decoding for context-free languages, for which prefixes can be efficiently determined [8, 66, 71]. Type systems, however, require language specifications that exceed the capabilities of context-free grammars [43], inhibiting the direct application of prior techniques to type-constrained decoding. Moreover, determining whether a partially generated expression can be completed to be a well-typed full expression involves not only type checking and inference, as done in traditional compilers, but also addressing type inhabitation [39, 67].", + "bbox": [ + 86, + 268, + 909, + 400 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To address these challenges, we design and implement a practical approach to determine whether a string can be completed to a well-typed program. We begin by developing a specialized kind of non-deterministic automaton that maintains a prefix property, formally defined in §3.2. This property ensures that every reachable state can lead to an accepting state. We leverage this property to build a completion engine for constrained decoding as in Algorithm 1. We construct such a completion engine to enforce well-typedness for a simply-typed language $L_{B}$ in §3.3-§3.5 and extend it to a core subset of TypeScript in §4. At a high level, the automaton acts as a syntactic parser, additionally maintaining information about initialized variables, enclosing function declarations, and other type-related aspects of the partially parsed syntax tree. This is possible through dynamically created", + "bbox": [ + 86, + 400, + 909, + 551 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "annotated states that track the additional information.", + "bbox": [ + 86, + 551, + 549, + 565 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In Figure 2, we provide a concrete example for our prefix automata. Every state represents the currently parsed syntactic component and additionally tracks the surrounding typing information. For example, after parsing the partial program in Figure 1, the automaton currently parses an expression as the first argument to function parseInt. Transitions are annotated with further code completions that are deemed admissible based on the syntax and typing information. In the first state, the automaton has parsed num, inferring from previous declarations that it represents an identifier of type number. Based on the signature of the parseInt function call, the required type of the completed", + "bbox": [ + 86, + 566, + 629, + 750 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/97d87110c0b553b842106c346a101d14eee820100979e854ea26b5cfa06cdac3.jpg", + "image_caption": [ + "Figure 2. An example of a prefix automaton." + ], + "image_footnote": [], + "bbox": [ + 644, + 558, + 909, + 711 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "argument is string. The automaton now determines the admissible transitions from the identifier state. State transitions corresponding to completions (1)-(4) from Figure 1 are disallowed, as they are determined to violate type rules based on the tracked type information. Further, the automaton needs to determine which operations on the current expression num of type number can be applied to obtain an expression of type string. To achieve this, we develop a type reachability search algorithm, which finds string-typed expressions num.toString() and num.isFinite(). toString(). Therefore, it returns that accesses to members .ToString and .isFinite are admissible, resulting in the two depicted transitions with the corresponding labels. In our experiment, CodeLlama 34B chooses to transition along .ToString(), the more likely completion based on its training data. Note that in our actual automaton formalism, as described at the end of §3.2, state transitions are", + "bbox": [ + 86, + 750, + 911, + 916 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Type-Constrained Code Generation with Language Models", + "bbox": [ + 90, + 81, + 495, + 97 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "171:5", + "bbox": [ + 868, + 83, + 907, + 95 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "on a character level. Figure 2 condenses character-level transitions into string-level transitions for presentation purposes.", + "bbox": [ + 86, + 118, + 907, + 151 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The type reachability algorithm seeks to identify sequences of operators applied to a given expression such that the resulting expression possesses a required type. Conceptually, it performs a search over an abstracted type graph, whose nodes are types, and edges represent well-typed operations connecting the input and output types. An example of such a (partial) graph is shown in Figure 3, with a valid path highlighted in green color. Starting from the derived number type of num, the search first traverses a member access edge to reach the nullary function type () => string. Then, it traverses an edge representing a function call to reach the goal type string, concluding that the combination of traversed operators . toString() is a well-formed", + "bbox": [ + 86, + 151, + 631, + 350 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "completion for Figure 1. The path for num.isFinite().ToString() is analogously valid but omitted in Figure 3 for brevity. This type reachability search is invoked every time a partial expression is parsed, in order to determine valid transitions in the prefix automaton.", + "bbox": [ + 86, + 351, + 905, + 400 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/7677ba40fa4dc888f7b11ccdde1711e919f778b9badf962eced9f4c57b32331b.jpg", + "image_caption": [ + "Figure 3. An example of a partial type search graph." + ], + "image_footnote": [], + "bbox": [ + 641, + 152, + 909, + 308 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We implement our approach for a significant subset of TypeScript (§4) and experimentally evaluate it for various LLMs and three important code generation tasks: synthesis, translation, and repair (§5). The results demonstrate that our approach provides significant benefits in both reducing compilation errors for LLM-generated code and increasing their functional correctness.", + "bbox": [ + 86, + 400, + 907, + 466 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3 Our Type Constraining Approach", + "text_level": 1, + "bbox": [ + 86, + 479, + 436, + 495 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this section, we first present a generic, simply-typed language $L_{B}$ (§3.1). Then, we present our type constraining approach using $L_{B}$ . Specifically, we introduce our prefix automaton formalism (§3.2) and define increasingly complex automata for parsing well-typed fragments of $L_{B}$ , beginning with identifiers, literals, and types (§3.3), continuing to expressions, including type search for type-restricted expressions (§3.4), and concluding with statements (§3.5).", + "bbox": [ + 86, + 500, + 907, + 583 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.1 A SimplyTyped Language", + "text_level": 1, + "bbox": [ + 86, + 595, + 384, + 613 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We define a simply typed, Turing-complete language, $L_{B}$ . Its grammar and type system are generic, resembling the principles found in popular statically typed languages, such as TypeScript, Java, and Go. However, there may be a slight bias towards TypeScript, as our implementation is based on it.", + "bbox": [ + 86, + 616, + 909, + 666 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Syntax. The syntax of $L_{B}$ is shown in Figure 4. The language includes expressions, type-annotated variable and function definitions, and control flows. Overall, it is based on a core subset of TypeScript [10] but can be adapted for other statically typed languages. Similar to Bierman et al. [10], we represent Kleene-Star repetitions using an overline, e.g., $\\overline{s}$ represents a sequence of statements $s$ , and adhere to the TypeScript documentation to annotate parameter types in function signatures with argument names [17]. We make a distinction between base and extension expressions. The latter applies operators to previous expressions, leading to more complex expressions. This differentiation is useful later in §3.4 for constructing the prefix automaton for parsing expressions.", + "bbox": [ + 86, + 675, + 909, + 808 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Expression Typing Rules. The typing rules for $L_B$ 's expressions are detailed in Figure 5. These rules form a subset of safeFTS, a type-safe portion of TypeScript described by Bierman et al. [10], allowing us to leverage their soundness results. The type rules for $L_B$ use the standard concept of a type environment, denoted as $\\Gamma$ , which is a collection of pairs $(x : T)$ of identifiers $x$ and types $T$ . We write $\\Gamma \\vdash e : T$ if the expression $e$ has type $T$ in the type environment $\\Gamma$ . An expression $e$ is considered valid if its type can be derived by applying the given typing rules.", + "bbox": [ + 86, + 815, + 911, + 916 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "171:6", + "bbox": [ + 90, + 84, + 129, + 94 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev", + "bbox": [ + 339, + 83, + 907, + 95 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/638ff536adeb7add76a966ada67900a19a856f6068152936b40b9f10840cbc7e.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
l ::=Literalp ::= x : TTyped Identifier
\\d+Numeric Literal
"\\"w*”String LiteralT ::=Type
true | falseBoolean LiteralnumberNumeric Type
stringString Type
x ::= \\w+IdentifierbooleanBoolean Type
(¯p) => TFunction Type
e ::= e0 | e1Expression
e0 ::=Base Expressions ::=Statement
lLiterallet x : T;Variable Declaration
xIdentifiere;Expression Statement
(¯p) => eFunction Expressionreturn e;Return Statement
(e)Grouped Expression{¯s}Statement Block
e1 ::=Extension Expressionfunction x (¯p) : T {¯s}Function Definition
e ⊙ eBinary Operatorif (e) s else sIf-Then-Else Statement
e(¯e)Function Call
e.nMember AccessM ::=¯sProgram
", + "bbox": [ + 96, + 115, + 876, + 375 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Figure 4. The syntax of $L_{B}$ . Expressions are categorized into base and extension expressions. The later extends a given expression with suffix operators to form more complicated expressions.", + "bbox": [ + 86, + 384, + 907, + 416 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "_literals are evaluated to their respective types (LIT - {NUM, STR, BOOL}). Identifiers $x$ are evaluated based on the corresponding type in the type environment (IDENT). Anonymous functions are typed according to their annotated parameter types, with the return type determined by the returned expression (ANON). Grouping preserves the type of the inner expression (GROUP).", + "bbox": [ + 86, + 445, + 905, + 511 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Binary operators have predefined signatures $S_{1} \\odot S_{2}: T$ , such as number + number : number for addition and $T = T: T$ for assignments. These signatures must be satisfied in well-typed expressions (op). Function calls require parameters to match the function signature (CALL). The type of member accesses $e.n$ is determined using an auxiliary function LOOKUP(S, n), which fetches the type of member $n$ for type $S$ . An instantiation of LOOKUP for TypeScript is provided by Bierman et al. [10].", + "bbox": [ + 86, + 512, + 909, + 595 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Statements and Type Environments. The typing rules for statements are presented in Figure 6. Type environments are modified by statements, in particular variable declarations and function definitions. We use the notation $\\Gamma_1 \\vdash s \\rightarrow \\Gamma_2$ to indicate that after executing statement $s$ in type environment $\\Gamma_1$ , the new environment is $\\Gamma_2$ .", + "bbox": [ + 86, + 605, + 907, + 670 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Variable declarations introduce the identifier with declared type into the type environment, provided the identifier is not already defined (DECL). The type environment defines the context to evaluate expressions (EXPR) and return statements (RET). Return statements are only well-typed inside function bodies. The statements inside statement blocks and if-then-else statements must maintain valid type environments, but do not have an external effect (BLOCK, ITE). This also applies to function definitions; however, the defined function is finally added to the external type environment (FUN). Lastly, empty statements do not alter the type environment (NOP), while statement sequences propagate the type environment along the execution (SEQ).", + "bbox": [ + 86, + 670, + 909, + 805 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Return Types. The rules for checking return types are presented in Figure 7. Firstly, return statements must contain expressions matching the function's declared return type. Secondly, such an expression must be returned on every execution path. We use the notation $\\Gamma \\vdash \\overline{s} : R$ to indicate the sequence of statements $\\overline{s}$ ensures a return value of type $R$ .", + "bbox": [ + 86, + 813, + 907, + 880 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "For variable declarations and expression statements, the return type of the subsequent statements is considered (R-DECL, R-EXPR). The return type of a return statement directly corresponds to the", + "bbox": [ + 86, + 881, + 907, + 915 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Type-Constrained Code Generation with Language Models", + "bbox": [ + 88, + 81, + 493, + 95 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "171:7", + "bbox": [ + 868, + 83, + 907, + 94 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\left[ \\mathrm {L I T - N U M} \\right] \\frac {}{\\Gamma \\vdash \\backslash d + : n u m b e r} \\quad \\left[ \\mathrm {L I T - S T R} \\right] \\frac {}{\\Gamma \\vdash \" \\backslash w * \" : s t r i n g} \\quad \\left[ \\mathrm {L I T - B O O L} \\right] \\frac {}{\\Gamma \\vdash \\text {t r u e , f a l s e : b o o l e a n}}\n$$\n", + "text_format": "latex", + "bbox": [ + 90, + 119, + 917, + 145 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\left[ \\mathrm {I D E N T} \\right] \\frac {(x : T) \\in \\Gamma}{\\Gamma \\vdash x : T} \\quad \\left[ \\mathrm {A N O N} \\right] \\frac {\\Gamma \\cup \\overline {{p}} \\vdash e : T}{\\Gamma \\vdash (\\overline {{p}}) \\Rightarrow e : (\\overline {{p}}) \\Rightarrow T} \\quad \\left[ \\mathrm {C A L L} \\right] \\frac {\\Gamma \\vdash f : (\\overline {{x}} : \\overline {{S}}) \\Rightarrow T \\quad \\Gamma \\vdash \\overline {{e}} : \\overline {{S}}}{\\Gamma \\vdash f (\\overline {{e}}) : T}\n$$\n", + "text_format": "latex", + "bbox": [ + 106, + 154, + 903, + 193 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\left[ \\mathrm {G R O U P} \\right] \\frac {\\Gamma \\vdash e : T}{\\Gamma \\vdash (e) : T} \\quad \\left[ \\mathrm {O P} \\right] \\frac {\\Gamma \\vdash e _ {1} : S _ {1} \\quad \\Gamma \\vdash e _ {2} : S _ {2} \\quad S _ {1} \\odot S _ {2} : T}{\\Gamma \\vdash e _ {1} \\odot e _ {2} : T} \\quad \\left[ \\mathrm {M E M} \\right] \\frac {\\Gamma \\vdash e : S \\quad \\text {L O O K U P} (S , n) = T}{\\Gamma \\vdash e . n : T}\n$$\n", + "text_format": "latex", + "bbox": [ + 90, + 201, + 917, + 236 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Figure 5. Typing rules for $L_B$ 's expressions.", + "bbox": [ + 329, + 247, + 664, + 263 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\left[ \\mathrm {D E C L} \\right] \\frac {x \\notin \\Gamma}{\\Gamma \\vdash \\operatorname {l e t} x : T ; \\rightarrow \\Gamma \\cup (x : T)} \\quad \\left[ \\mathrm {E X P R} \\right] \\frac {\\Gamma \\vdash e : T}{\\Gamma \\vdash e ; \\rightarrow \\Gamma} \\quad \\left[ \\mathrm {R E T} \\right] \\frac {\\text {i n s i d e f u n c t i o n b o d y} \\quad \\Gamma \\vdash e : T}{\\Gamma \\vdash \\operatorname {r e t u r n} e ; \\rightarrow \\Gamma}\n$$\n", + "text_format": "latex", + "bbox": [ + 90, + 315, + 915, + 350 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\left[ \\mathrm {B L O C K} \\right] \\frac {\\Gamma \\vdash \\overline {{s _ {B}}} \\leftrightarrow \\Gamma_ {B}}{\\Gamma \\vdash \\{\\overline {{s _ {B}}} \\} \\nrightarrow \\Gamma} \\quad \\left[ \\mathrm {F U N} \\right] \\frac {x \\notin \\Gamma \\qquad \\Gamma \\cup (x : (\\overline {{p}}) = > T) \\cup (\\overline {{p}}) \\vdash \\overline {{s _ {x}}} \\nrightarrow \\Gamma_ {x}}{\\Gamma_ {1} \\vdash \\text {f u n c t i o n} x (\\overline {{p}}) : T \\{\\overline {{s _ {x}}} \\} \\nrightarrow \\Gamma \\cup (x : (\\overline {{p}}) = > T)}\n$$\n", + "text_format": "latex", + "bbox": [ + 152, + 358, + 853, + 395 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\left[ \\mathrm {I T E} \\right] \\frac {\\Gamma \\vdash s _ {i f} \\rightarrow \\Gamma_ {i f} \\quad \\Gamma \\vdash s _ {e l s e} \\leftrightarrow \\Gamma_ {e l s e}}{\\Gamma \\vdash \\mathrm {i f} (e) s _ {i f} \\mathrm {e l s e} s _ {e l s e} \\leftrightarrow \\Gamma} \\quad \\left[ \\mathrm {N O P} \\right] \\frac {}{\\Gamma \\vdash \\bullet \\rightarrow \\Gamma} \\quad \\left[ \\mathrm {S E Q} \\right] \\frac {\\Gamma_ {1} \\vdash \\bar {s} \\leftrightarrow \\Gamma_ {2} \\quad \\Gamma_ {2} \\vdash s \\leftrightarrow \\Gamma_ {3}}{\\Gamma_ {1} \\vdash \\bar {s} s \\leftrightarrow \\Gamma_ {3}}\n$$\n", + "text_format": "latex", + "bbox": [ + 100, + 402, + 905, + 441 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Figure 6. Type environment extension rules for sequences of statements in $L_{B}$ .", + "bbox": [ + 195, + 452, + 798, + 469 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\left[ R - D E C L \\right] \\frac {\\Gamma \\vdash \\bar {s} : R}{\\Gamma \\vdash \\operatorname {l e t} x : T ; \\bar {s} : R} \\quad \\left[ R - E X P R \\right] \\frac {\\Gamma \\vdash \\bar {s} : R}{\\Gamma \\vdash e ; \\bar {s} : R} \\quad \\left[ R - R E T \\right] \\frac {\\Gamma \\vdash e : R}{\\Gamma \\vdash \\operatorname {r e t u r n} e ; \\bar {s} : R}\n$$\n", + "text_format": "latex", + "bbox": [ + 152, + 520, + 839, + 552 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\left[ \\mathrm {R - B L O C K - S E L F} \\right] \\frac {\\Gamma \\vdash \\overline {{s _ {B}}} : R \\quad \\Gamma \\vdash \\overline {{s}}}{\\Gamma \\vdash \\{\\overline {{s _ {B}}} \\} \\overline {{s}} : R} \\quad \\left[ \\mathrm {R - B L O C K - N E X T} \\right] \\frac {\\Gamma \\vdash \\overline {{s _ {B}}} \\quad \\Gamma \\vdash \\overline {{s}} : R}{\\Gamma \\vdash \\{\\overline {{s _ {B}}} \\} \\overline {{s}} : R}\n$$\n", + "text_format": "latex", + "bbox": [ + 207, + 559, + 790, + 595 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\left[ \\mathrm {R - F U N} \\right] \\frac {\\Gamma \\cup (x : (\\bar {p} \\Rightarrow R)) \\vdash \\bar {s} : R ^ {\\prime} \\quad \\Gamma \\cup (x : (\\bar {p}) \\Rightarrow R) \\cup (\\bar {p}) \\vdash \\bar {s _ {x}} : R}{\\Gamma \\vdash \\text {f u n c t i o n} x (\\bar {p}): R \\{\\bar {s _ {x}} \\} \\bar {s} : R ^ {\\prime}}\n$$\n", + "text_format": "latex", + "bbox": [ + 213, + 602, + 779, + 640 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\left[ \\mathrm {R - I T E - S E L F} \\right] \\frac {\\Gamma \\vdash s _ {i f} : R \\quad \\Gamma \\vdash s _ {e l s e} : R}{\\Gamma \\vdash \\mathrm {i f} (e) s _ {i f} \\mathrm {e l s e} s _ {e l s e} \\bar {s} : R} \\quad \\left[ \\mathrm {R - I T E - N E X T} \\right] \\frac {\\Gamma \\vdash \\bar {s} : R}{\\Gamma \\vdash \\mathrm {i f} (e) s _ {i f} \\mathrm {e l s e} s _ {e l s e} \\bar {s} : R}\n$$\n", + "text_format": "latex", + "bbox": [ + 137, + 648, + 858, + 688 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Figure 7. $L_{B}$ 's typing rules for function returns.", + "bbox": [ + 314, + 700, + 681, + 715 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "type of the returned expression (R-RET). For statement blocks, the return type is decided by either the block itself or the subsequent statements (R-BLOCK-SELF, R-BLOCK-NEXT). In function definitions, the return type is determined by the type of the subsequent statements, similar to expression statements. It is additionally required that the function body returns a type matching the declared return type (R-FUN). For if-then-else statements, both branches must return the same type (R-ITE-SELF), or the return type is determined by the following statements (R-ITE-NEXT).", + "bbox": [ + 86, + 763, + 909, + 865 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Language Definition. In summary, a program $s$ is in language $L_{B}$ if both (i) $s$ conform to the grammar in Figure 4 and (ii) $s$ is well-typed according to the typing rules in Figures 5-7.", + "bbox": [ + 86, + 879, + 907, + 913 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "171:8", + "bbox": [ + 90, + 84, + 129, + 95 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev", + "bbox": [ + 339, + 83, + 907, + 97 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3.2 Prefix Automaton Definition", + "text_level": 1, + "bbox": [ + 88, + 118, + 409, + 134 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We introduce a general definition of prefix automata, beginning with basic automata concepts. Prefix automata are standard automata that ensure a special prefix property2. This property enables us to use a prefix automaton to decide whether some string is in the prefix language $L^p$ of a given language $L$ . That is, the prefix automaton can function as a completion engine $CE_L$ to facilitate constrained decoding, as described in §2.", + "bbox": [ + 86, + 138, + 909, + 222 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We consider an automaton $A \\coloneqq \\langle \\Sigma, Q, \\delta, I, F \\rangle$ , a tuple of the five following elements: (i) $\\Sigma$ is an alphabet of input symbols; (ii) $Q$ is a set of states; (iii) $\\delta : Q \\times \\Sigma \\mapsto \\mathcal{P}(Q)$ is a computable transition function that maps a state and an input symbol to a finite set of next states; (iv) $I \\subseteq Q$ is a finite set of initial states; and (v) $F \\subseteq Q$ is a decidable set of accepting states. As a convention, we denote a symbol in $\\Sigma$ as $c$ , a string of symbols in $\\Sigma^*$ as $s$ , the empty string as $\\varepsilon$ and an operator for concatenating symbols and strings as $\\circ$ . The transition function $\\delta$ maps a given state to all possible subsequent states. When $\\delta$ is applied on a set of states $\\mathbf{q} \\subseteq Q$ , we take the union of the results as output, i.e., $\\delta(\\mathbf{q}, c) \\coloneqq \\bigcup_{q \\in \\mathbf{q}} \\delta(q, c)$ . The transition function defines a directed graph $G$ over $Q$ , where every state is a node and there is an edge annotated with $c$ from $q$ to $q'$ if $q' \\in \\delta(q, c)$ . The language parsed by $A$ comprises all strings $s$ such that traversing $G$ from some initial state in $I$ along the edges annotated with $c_i$ for $c_1 \\circ c_2 \\circ \\ldots \\circ c_n = s$ , it is possible to reach some accepting state in $F$ . Formally, we define recursively a traversal function $\\gamma$ for states $\\mathbf{q}$ as $\\gamma(\\mathbf{q}, \\varepsilon) \\coloneqq \\mathbf{q}$ and $\\gamma(\\mathbf{q}, s \\circ c) \\coloneqq \\delta(\\gamma(\\mathbf{q}, s), c)$ . The language accepted by $A$ is then defined as $L(A) \\coloneqq \\{s \\mid \\gamma(I, s) \\cap F \\neq \\emptyset\\}$ . The traversal function has two intuitive properties concerning reachability that can be shown inductively:", + "bbox": [ + 86, + 220, + 909, + 455 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(P1) A path along the graph can be split arbitrarily, i.e., $\\gamma (\\mathbf{q},s\\circ s^{\\prime}) = \\gamma (\\gamma (\\mathbf{q},s),s^{\\prime})$", + "(P2) If a state is reached by $s \\circ s'$ , some state is reachable by $s$ , i.e., $\\gamma(\\mathbf{q}, s \\circ s') \\neq \\emptyset \\Rightarrow \\gamma(\\mathbf{q}, s) \\neq \\emptyset$ ." + ], + "bbox": [ + 94, + 456, + 907, + 491 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "An automaton satisfies the prefix property or is a prefix automaton, if there is a path from every reachable state to some accepting state, or formally:", + "bbox": [ + 86, + 493, + 909, + 527 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "DEFINITION 2. For an automaton $A$ , the prefix property holds iff $\\forall q \\in \\gamma(I, s) : \\exists s' : \\gamma(q, s') \\cap F \\neq \\emptyset$ . The automaton is a prefix automaton if it satisfies the prefix property.", + "bbox": [ + 88, + 534, + 909, + 570 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Intuitively, for such automata, reaching some state by consuming string $s$ implies that $s$ is a prefix to some member of $L(A)$ . We define the reachable language of $A$ , all inputs that result in some state, as $L_r(A) := \\{s \\mid \\gamma(I, s) \\neq \\emptyset\\}$ . Below, we establish the equivalence of $L_r(A)$ and $L(A)^p$ , the prefix language of $L(A)$ as defined in Definition 1.", + "bbox": [ + 86, + 576, + 909, + 643 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "LEMMA 1. If $A$ is a prefix automaton, then $L(A)^p = L_r(A)$ .", + "bbox": [ + 106, + 651, + 590, + 669 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Proof. For any $s \\in L(A)^p$ there exists $s'$ such that $s \\circ s' \\in L(A)$ , by the definition of prefix languages. By the definition of $L(A)$ , this implies $\\gamma(I, s \\circ s') \\neq \\emptyset$ . Then, using (P2), we further derive $\\gamma(I, s) \\neq \\emptyset$ , i.e., $s \\in L_r(A)$ . Therefore, $L(A)^p \\subseteq L_r(A)$ holds. The other direction also holds. We first see that $s \\in L_r(A) \\implies \\gamma(I, s) \\neq \\emptyset$ . Then applying Definition 2 and (P1), we find $\\exists s': \\gamma(I, s \\circ s') \\cap F \\neq \\emptyset$ , implying $s \\circ s' \\in L(A)$ and thus $s \\in L(A)^p$ .", + "bbox": [ + 86, + 676, + 911, + 759 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Note that $L(A)^P \\subseteq L_r(A)$ holds generally for automata, since the first half of the proof does not require the prefix property.", + "bbox": [ + 86, + 766, + 909, + 802 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "From Prefix Automata to Completion Engines. With Lemma 1, given a prefix automaton $A$ , we can define a convenient-to-compute completion engine for the underlying language $L(A)$ : $CE_{L(A)}(s) \\coloneqq \\gamma(I, s) \\neq \\emptyset$ . Since our target language is $L$ and not $L(A)$ , we now need to determine the relationship between $L(A)$ and $L$ . If we construct $A$ such that it parses a subset of $L$ , i.e., $L(A) \\subseteq L$ , we are guaranteed that all LLM generations constrained by $CE_{L(A)}$ lie in $L$ . Conversely, if $L(A) \\supseteq L$ ,", + "bbox": [ + 86, + 808, + 911, + 894 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Type-Constrained Code Generation with Language Models", + "bbox": [ + 88, + 81, + 495, + 97 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "171:9", + "bbox": [ + 868, + 83, + 907, + 95 + ], + "page_idx": 8 + }, + { + "type": "page_footnote", + "text": "Note that the prefix property defined in our work differs from the one discussed in classical texts, e.g., [31]", + "bbox": [ + 88, + 900, + 814, + 916 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "we are guaranteed that every string in $L$ can be expressed under constrained decoding, but not that every generation is valid. For example, if $A$ permits all syntactically correct programs, it guarantees that all well-typed programs can be generated, but permits ill-typed programs as well. Therefore, $L(A) \\subseteq L$ is required to achieve our goal of enforcing well-typedness on LLM-generated code. Ideally, $A$ would parse $L$ exactly, i.e., $L(A) = L$ , which in our setting additionally guarantees that every well-typed program can be expressed under the constraints of the completion engine. If this is not achieved, it is important for $A$ to capture a large subset of $L$ to be practically useful.", + "bbox": [ + 86, + 116, + 909, + 234 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Building a Prefix Automaton for $L_B$ : Warming up. In the next sections, we will construct a prefix automaton for soundly parsing well-typed programs in $L_B$ , by presenting various prefix automata for well-typed fragments of $L_B$ . Our final automaton will cover a significant but incomplete subset of $L_B$ . Incompleteness exists because to ensure that our algorithms terminate, we do not cover high-order types that are less likely to occur in practice. This is discussed in more detail in §3.4. Our evaluation in §5 empirically demonstrates that our approach sufficiently covers practical use cases to significantly improve the correctness of LLM-generated code.", + "bbox": [ + 86, + 244, + 907, + 359 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We choose $\\Sigma$ to be the set of Unicode characters. This makes our completion engine agnostic to LLM vocabularies. Even though LLMs' vocabularies differ, their tokens are always a string of single or multiple characters. When our completion engine for $L_{B}$ is called during constrained decoding, i.e., at Line 6 of Algorithm 1, it processes the sampled token character by character.", + "bbox": [ + 86, + 361, + 907, + 426 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Before proceeding, we briefly introduce several base prefix automata below, with their precise definitions detailed in Appendix A.1. These automata are later combined, with parts of the transition function being overwritten, to construct more complex automata that capture elements of $L_{B}$ .", + "bbox": [ + 86, + 427, + 905, + 477 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Union $A_X \\cup A_Y$ parses the language $\\{s \\mid s \\in L(A_X) \\cup L(A_Y)\\}$ . It is a prefix automaton if both $A_X$ and $A_Y$ are prefix automata.", + "- Concatenation $A_{XY}$ parses the language $\\{s \\circ s' \\mid s \\in L(A_X), s' \\in L(A_Y)\\}$ . It is a prefix automaton if $A_X$ and $A_Y$ are both prefix automata, and $L(A_Y) \\neq \\emptyset$ .", + "- Kleene-Star $A_{\\overline{X}}$ parses the language $\\{\\overline{s} \\mid s \\in L(A_X)\\}$ . It is a prefix automaton if $A_X$ is a prefix automaton.", + "- Terminal $A_{\\mathsf{S}}$ parses the language $\\{\\mathsf{S}\\}$ , where $\\mathsf{S}$ is a fixed, non-empty string.", + "- Empty $A_{\\emptyset}$ parses the empty language $\\varnothing$ and is always a prefix automaton." + ], + "bbox": [ + 106, + 481, + 903, + 613 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "3.3 Prefix Automata for Identifiers, Literals, and Types", + "text_level": 1, + "bbox": [ + 86, + 629, + 613, + 647 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We now introduce prefix automata for basic syntactic elements of $L_{B}$ : identifiers, literals, and type annotations. The languages parsed by these automata exactly match their counterparts in $L_{B}$ .", + "bbox": [ + 86, + 650, + 905, + 684 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "**Literals.** The prefix automaton for literals $A_{I} \\coloneqq A_{\\mathrm{NUM}} \\cup A_{\\mathrm{STR}} \\cup A_{\\mathrm{BOOL}}$ accepts number, string, and boolean literals as defined in Figure 4. The automata $A_{\\mathrm{NUM}}, A_{\\mathrm{STR}}$ , and $A_{\\mathrm{BOOL}}$ are defined by the deterministic finite automaton representation of the corresponding regular expression of the literal. To ensure the prefix property on the finite automata of the regular expression, we prune states from which accepting states can not be reached.", + "bbox": [ + 86, + 694, + 907, + 777 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Identifiers. During parsing, we maintain the current type environment $\\Gamma$ , as detailed in §3.5. We define the identifier automaton $A_{x}$ as the union of the terminal automata for identifiers defined in $\\Gamma$ . In other words, $A_{x} := \\bigcup_{y \\in \\Gamma} A_{y}$ .", + "bbox": [ + 86, + 787, + 907, + 838 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Types. The type automaton $A_{T}$ accepts type annotations as defined in the grammar of $L_{B}$ (Figure 4). It is defined as $A_{T} := A_{\\mathrm{TYPE - LIT}} \\cup A_{\\mathrm{TYPE - FUN}}$ . This includes type literal automaton $A_{\\mathrm{TYPE - LIT}} := A_{\\mathrm{string}} \\cup A_{\\mathrm{number}} \\cup A_{\\mathrm{boolean}}$ and function type automaton $A_{\\mathrm{TYPE - FUN}} := A_{(\\overline{p})} \\Rightarrow T$ . The latter is a concatenation of multiple prefix automata, with the parameter and return types recursing on $A_{T}$ . This recursive", + "bbox": [ + 86, + 847, + 909, + 915 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "171:10", + "bbox": [ + 90, + 83, + 137, + 95 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev", + "bbox": [ + 339, + 81, + 907, + 95 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "definition is valid, since it ensures a finite set of initial states, defines a decidable accepting set, and preserves the prefix property.", + "bbox": [ + 86, + 116, + 905, + 151 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "3.4 Prefix Automaton for Expressions", + "text_level": 1, + "bbox": [ + 88, + 165, + 456, + 181 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "We introduce prefix automata to parse well-typed expressions in $L_{B}$ . We begin by describing an automaton $A_{e}$ to parse expressions whose types are unrestricted, e.g., any expression $e$ in an expression statement $e$ ;. Then, we present an automaton $A_{e} \\downarrow T$ for expressions whose type is constrained to $T$ , e.g., for parameters of function calls. The type-constrained version accepts a string only if the inhabited type of the represented expression matches $T$ . To preserve the prefix property, we need to ensure that partial expressions can be completed to inhabit the constrained type. Completions may involve arbitrarily many applications of operators, which may modify the expression type. We therefore introduce a type search algorithm that soundly determines which types an expression can inhabit, and use it to prune transitions that violate the prefix property.", + "bbox": [ + 86, + 186, + 907, + 336 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Unrestricted Expressions. To handle the recursive syntactic structure of expressions, we differentiate two kinds as shown in Figure 4: base expressions, including identifiers, literals, grouped expressions, and anonymous functions, and extension expressions, which are operator applications (binary operator, member access, or function call) that lead to extending a given expression.", + "bbox": [ + 86, + 345, + 909, + 411 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The expression automaton $A_{e}$ is thus defined as the union of base expression automata $A_{x}, A_{l}, A_{(e)}$ , and $A_{(\\overline{p})} \\Rightarrow e$ , with potential extensions $A_{\\odot e}, A_{.n}$ , and $A_{(\\overline{e})}$ . The individual base and extension automata are constructed by concatenating the respective terminal automata and recursively $A_{e}$ . Additionally, we restrict the type of the recursive $A_{e}$ if the restriction is required by the type system, e.g., for parsing call parameters with a fixed type. We provide additional details on this restriction in Appendix A.2. Since an expression can end after either base or extensions, accepting states of both base and extending automata are accepting states of $A_{e}$ . To implement extensions, we start from the base expression automata and recursively adjust $A_{e}$ 's transition function $\\delta_{e}$ by adding outgoing edges from the accepting states of the current automaton to the initial states of the extending automata, or formally:", + "bbox": [ + 86, + 412, + 909, + 577 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\forall X, Y: \\delta_ {e} (q _ {Y} ^ {X}, c) := \\left\\{ \\begin{array}{l l} \\delta_ {Y} (q _ {Y} ^ {X}, c) \\cup \\delta_ {e} (I _ {(\\overline {{e}})} ^ {X}, c) \\cup \\delta_ {e} (I _ {\\odot e} ^ {X}, c) \\cup \\delta_ {e} (I _ {. n} ^ {X}, c) & \\text {i f q _ {Y} ^ {X} \\in F _ {Y}} \\\\ \\delta_ {Y} (q _ {Y} ^ {X}, c) & \\text {o t h e r w i s e ,} \\end{array} \\right.\n$$\n", + "text_format": "latex", + "bbox": [ + 158, + 584, + 831, + 630 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "where the labels $X$ and $Y$ for a state $q_{Y}^{X}$ represent that a string $X$ has been parsed, and currently the active automaton is $A_{Y}$ , which can be one of the following: $A_{x}, A_{l}, A_{(e)}, A_{(\\overline{p})} \\Rightarrow e, A_{\\odot e}, A_{.n}$ , and $A_{(\\overline{e})}$ . The superscripts are useful for tracking the currently expressed type, enabling us to determine the validity of extensions and transition to type-restricted expressions based on $L_{B}$ 's typing rules. For instance, for state $q^{42}$ , the addition operator extension $+e$ and function call extension $(\\overline{e})$ are syntactically applicable to 42 of type number. While the addition operator with type signature number + number :number is allowed, we can not apply a function call on number. In general, we set $I_{Y}^{X} := \\emptyset$ when $Y$ is an invalid extension to $X$ . Moreover, for the extension $+e$ to be valid, $e$ must be of type number. To this end, we transition to a type-restricted expression automaton by setting $I_{+e}^{42}$ to the set of initial states for $A_{+} \\circ (A_{e} \\downarrow \\text{number})$ . Similar to the recursive type automaton, our definition of $A_{e}$ ensures a finite set of initial states and a decidable accepting set.", + "bbox": [ + 86, + 638, + 907, + 820 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Type-Constrained Expressions. To implement $A_{e} \\downarrow T$ , we must determine whether a partial expression $s$ can be completed to inhabit type $T$ . Completing $s$ without any extension can lead to a possible set of types and repeated extensions can further alter the result type, but we are not guaranteed that the desired type can be reached. Moreover, extensions can be applied indefinitely, prohibiting an exhaustive search of possible completions.", + "bbox": [ + 86, + 831, + 909, + 915 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Type-Constrained Code Generation with Language Models", + "bbox": [ + 90, + 81, + 495, + 95 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "171:11", + "bbox": [ + 862, + 83, + 905, + 94 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "We therefore develop a two-tiered algorithm, which we describe in the following paragraphs. This algorithm first identifies the derivable types DERIVABLE $(q_{s})$ of $s$ based on its current state $q_{s}$ . DERIVABLE $(q_{s})$ refers to the set of inhabitable types for all possible expressions completed from $s$ without extension. Second, a type reachability search REACHABLE $(\\text{DERIVABLE}(q_{e}), T)$ is performed to determine if $T$ can be inhabited by extending from the derivable types of $s$ .", + "bbox": [ + 86, + 116, + 909, + 202 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "We prune automaton transitions when this type search returns a negative result. To ensure the prefix property, the performed search is sound, i.e., it only returns a positive result if $T$ can be expressed by a valid sequence of extensions. This also aligns with our goal of generating only well-typed programs, ensuring that our expression automata accept a subset of all well-typed expressions of $L_{B}$ . To ensure termination, the search is incomplete, i.e., there may be a valid sequence of transitions to express $T$ which is not found by the search and we may end up disallowing generation of a well-typed expression. However, it only avoids traversing types of high complexity that are less likely to occur in practice. We further empirically ensure that our approach is practically effective (§5).", + "bbox": [ + 86, + 201, + 909, + 352 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Derivable Types. For the first part of the algorithm, we determine all types inhabitable by the currently parsed expression $s$ without extension, i.e., $\\mathrm{DERIVABLE}(q_s)$ . For example, while parsing partial identifier $x$ in the type environment $\\Gamma := \\{(x : \\text{number}), (xy : \\text{string})\\}$ , we have $\\mathrm{DERIVABLE}(q_x) = \\{\\text{number}, \\text{string}\\}$ and $\\mathrm{DERIVABLE}(q_{xy}) = \\{\\text{string}\\}$ . For a final state $q$ of expression $e$ , we define $\\mathrm{DERIVABLE}(q) := T$ , where $\\Gamma \\vdash e : T$ . Different expressions impose different", + "bbox": [ + 90, + 358, + 427, + 573 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "rules on derivability, and we present the detailed rules in Table 1. Note that for grouped expressions and function literals, we need to enumerate reachable types by recursively contained expressions. To avoid explicitly enumerating all reachable types, we integrate the derivability and reachability algorithms. This optimization is discussed in more detail in Appendix A.4.", + "bbox": [ + 86, + 573, + 909, + 641 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "LEMMA 2. For state $q \\in \\gamma(I_e, s)$ of partial expression $s$ , DERIVABLE( $q$ ) returns all $T$ s.t. exists some suffix $s'$ with $\\Gamma \\vdash s \\circ s': T$ and $s'$ does not involve an extension (operator, call, or member access).", + "bbox": [ + 88, + 650, + 909, + 684 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "PROOF. By case distinction on the possible states of partial expressions.", + "bbox": [ + 106, + 691, + 713, + 709 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Type Reachability. To determine which types are inhabitable by extending a base expression $e$ of a given type $T$ (with binary operator, function call, or member access), we analyze sequences of single extension steps with compatible signatures. This process is conceptualized as a search over a graph where types are nodes and extension steps are edges. For every binary operator $\\odot$ with the signature $T \\odot X : S$ , an edge is created from type $T$ to type $S$ . As an example, the operator for numerical addition $+$ has the signature number $+$ number: number, thereby forming an edge from number to itself. Furthermore, for every member $n$ of type $T$ , we create an edge from $T$ to $\\text{LOOKUP}(T, n)$ , e.g., from number to $() =>$ string for the member to string of number type. Finally, we connect each function type $(\\overline{p}) => R$ and with its return type $R$ . For instance, $() =>$ string is connected with string. Examples of type graphs can be found in §2.2 and Figure 3. Note that these extension steps are abstract, in the sense that they focus on the type of the expression being extended and the resulting type after extension, not considering textual representation and parameters.", + "bbox": [ + 86, + 715, + 909, + 916 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/72e91109dcf5c43d35127ada7978584ddc3a01d52f248a6f41efc03e3062ce21.jpg", + "table_caption": [ + "Table 1. Definition of DERIVABLE(x) for partial expressions introduced in Figure 4. $s \\leq s'$ expresses that $s$ is a prefix of $s'$ . pmatch(s, T) determines whether a prefix $s$ partially matches the regular expression of literals of type $T$ ." + ], + "table_footnote": [], + "table_body": "
sDERIVABLE(qs)
l{T | pmatch(l,T),T ∈ {number, string, boolean}}
x{T | x ≤ n, (n : T) ∈ Γ}
(¯p) => e{ (¯p) => T | REACHABLE(DERIVABLE(qe),T)}
(e{T | REACHABLE(DERIVABLE(qe),T)}
e ⊙{T | ∃S': Γ ↔ e : S ∧ S ⊕ S': T}
e({R | Γ ↔ e: (¯p) => R}
e.a{S | a ≤ n, Γ ↔ e : T, LOOKUP(T,n) = S}
", + "bbox": [ + 436, + 426, + 905, + 566 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "171:12", + "bbox": [ + 90, + 83, + 137, + 95 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev", + "bbox": [ + 339, + 81, + 907, + 98 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/a30990f5fbd78a3d5bd03abb5291ed565e72139dbd4b176396fd845291662c23.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Algorithm 2 Our type reachability search algorithm
Input: Current type T of some expression e, goal type G
Output: Whether G can be reached by extending e
1: function REACHABLE(T, G)
2: if T = G then return true▷ The goal type is successfully found
3: if T is marked then return false else mark T▷ Type T is marked to avoid cycles
4: for each valid extension step ⌿ from T do
5: S := the resulting type of applying ⌿ on T
6: if PRUNESEARCH(T, G, S) continue▷ Prune the search to ensure termination
7: if REACHABLE(S, G) return true▷ Recurse to the next round of extension
8: return false▷ No suitable extension is found
", + "bbox": [ + 88, + 115, + 909, + 295 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The type reachability algorithm, Algorithm 2, implements a depth-first search over this type graph, starting from the current type $T$ , succeeding upon finding goal type $G$ (Line 2), marking any visited types to prevent cycles (Line 3). Then, it proceeds to iterate over all valid extension steps from $T$ (Line 4) and computes the resulting type $S$ after the extension step is applied (Line 5). In the conceptualized type graph, as described in the previous paragraph, this is equivalent to exploring all outgoing edges from $T$ . At Line 7, we proceed to recursively search if $S$ can reach $G$ . If all recursive calls are unsuccessful, the goal type can not be reached (Line 8).", + "bbox": [ + 86, + 320, + 909, + 437 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Some programming languages define self-referential default members, e.g., clone in Java or value0f in TypeScript, which are nullary functions that return a value of the same type as the callee, $(\\mathbf{\\beta})\\Rightarrow T$ for type $T$ . When these members are accessed in functions, higher-order functions can be derived indefinitely. For instance, for a function $f$ with type $(\\mathbf{\\beta})\\Rightarrow S$ , $f.$ value0f has the type $(\\mathbf{\\beta})\\Rightarrow (\\mathbf{\\beta})\\Rightarrow S$ . We therefore need to restrict the type search to a finite set of types to ensure termination. At Line 6 of Algorithm 2, we add a heuristic PRUNESEARCH into the search, which decides where to prune the search process. We develop a simple heuristic based on the results from Gvero et al. [30]. This heuristic prunes exploration of types with higher complexity than goal or source type if they do not contain yet unexplored primitive types, thus preventing exploration of arbitrarily complex types. The details of this heuristic are presented in Appendix A.3. While ensuring termination, our heuristic leads to incompleteness and the potential rejection of well-typed expressions. However, this effect is less pronounced in practical usage, as only highly complex (thus less realistically used) types are avoided.", + "bbox": [ + 86, + 438, + 909, + 652 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We proceed to prove the soundness of Algorithm 2 below.", + "bbox": [ + 106, + 654, + 602, + 672 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "LEMMA 3. The type search in Algorithm 2 is sound, i.e., for any expression $e$ with $\\Gamma \\vdash e : T$ , if REACHABLE(T,G) holds, then there exists a sequence of extensions $y$ such that $\\Gamma \\vdash e \\circ y : G$ .", + "bbox": [ + 88, + 679, + 911, + 713 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Proof. By the design of Algorithm 2, if REACHABLE $(T,G)$ returns true, there is a sequence of $n$ recursive calls to REACHABLE $(T_i,G)$ , with $T_0 = T$ and REACHABLE $(T_n,G) = \\text{true}$ . Each $T_i$ ( $i > 0$ ) is derived because some extension $\\diamond_i$ is applicable to $T_{i-1}$ based on the typing rules of $L_B$ . We then convert each $\\diamond_i$ to its concrete, textual version $\\spadesuit_i$ . This representation includes the required well-typed parameters of $\\spadesuit_i$ (i.e., for binary operators and non-nullary functions), which are constructed using literals. Finally, we construct $y$ as $\\spadesuit_1 \\circ \\ldots \\circ \\spadesuit_n$ .", + "bbox": [ + 86, + 720, + 911, + 823 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Note that using any pruning heuristic at Line 6 of Algorithm 2 preserves soundness, which in turn is sufficient to preserve the required prefix property, as defined in Definition 2. We can conclude that the two-tiered search algorithm soundly determines whether the desired target type can be derived from some partial input. Therefore, we conclude that $A_{e} \\downarrow T$ and $A_{e}$ are prefix automata that parse a subset of well-typed expressions in $L_{B}$ .", + "bbox": [ + 86, + 831, + 911, + 916 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Type-Constrained Code Generation with Language Models", + "bbox": [ + 88, + 81, + 495, + 98 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "171:13", + "bbox": [ + 862, + 83, + 907, + 95 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "COROLLARY 4. If REACHABLE( DERIVABLE(q), G) holds for any $q \\in \\gamma(I_e, s)$ of a partial expression $s$ , then there exists a suffix $s'$ such that $\\Gamma \\vdash s \\circ s': G$ .", + "bbox": [ + 90, + 116, + 907, + 151 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Proof. This conclusion follows directly from Lemmas 2 and 3.", + "bbox": [ + 111, + 161, + 639, + 177 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "LEMMA 5. The language parsed by $A_e \\downarrow T$ is thus a subset of the expressions of $L_B$ of type $T$ , i.e., $L(A_e \\downarrow T) \\subseteq \\{s \\mid \\Gamma \\vdash s : T\\}$ . Since $A_e$ recursively involves $A_e \\downarrow T$ , the language parsed by $A_e$ is also a subset of well-typed expressions of $L_B$ , i.e., $L(A_e) \\subseteq \\{s \\mid \\exists T : \\Gamma \\vdash s : T\\}$ .", + "bbox": [ + 90, + 190, + 907, + 240 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "3.5 Prefix Automata for Statements", + "text_level": 1, + "bbox": [ + 90, + 254, + 434, + 270 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We define the remaining automata to capture the complete language $L_{B}$ . The statement automaton is defined recursively as $A_{s} \\coloneqq A_{\\mathrm{DECL}} \\cup A_{\\mathrm{EXPR}} \\cup A_{\\mathrm{RET}} \\cup A_{\\mathrm{BLOCK}} \\cup A_{\\mathrm{FUN}} \\cup A_{\\mathrm{ITE}}$ . The declaration automaton $A_{\\mathrm{DECL}} \\coloneqq A_{\\mathrm{let} x: T}$ ; captures undefined variable names $x$ by accepting all strings, except for existing identifiers. This automaton is a prefix automaton since an accepting state can always be reached by appending characters to the declared identifier. The return statement automaton is $A_{\\emptyset}$ when outside a function and restricts the parsed expression to the return type of the surrounding function otherwise. The remaining automata are mainly concatenations of previously defined automata and recursive invocations of $A_{s}$ , with small variations detailed in Appendix A.5.", + "bbox": [ + 90, + 275, + 907, + 408 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Tracking Type Environments. Generally, we follow the typing rules in Figure 6. Identifiers are passed on through all state transitions, matching the rule SEQ, where the type environment of consecutive statements needs to be compatible. However, in the cases of BLOCK, ITE and FUN, we discard the local type environment after parsing, matching the respective typing rules. In FUN additionally, the function signature and parameters are added into the type environment of the function body automaton, and the function signature in the environment of subsequent statements.", + "bbox": [ + 90, + 416, + 907, + 518 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Guaranteeing Return Types. When parsing the body of a function, the transition function of the function automata $A_{\\mathrm{FUN}}$ maintains information about the declared return type and the encountered return statements (if any). $A_{\\mathrm{FUN}}$ only accepts states where all return values match the declared return type and all execution paths inside the function body return, following $L_B$ 's typing rules in Figure 7. If the current generated statements do not return in all execution paths, another statement is forced to be generated. Since we can always express the requested type through literals, a correct return statement can always be generated and the prefix automaton property is not violated.", + "bbox": [ + 90, + 526, + 907, + 643 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "The described rules are straightforward to implement without violating the prefix property as all restrictions are derived only from already parsed input, e.g., the already defined identifiers or the previously declared function return type. We can therefore deduce that the statement automaton is a prefix automaton. Moreover, the automaton accepts all valid statements of $L_{B}$ , with the exception of well-typed expressions rejected by $A_{e}$ . Therefore the parsed language is a subset of $L_{B}$ .", + "bbox": [ + 90, + 643, + 907, + 727 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "LEMMA 6. With $A_M \\coloneqq A_{\\overline{s}}$ it holds that $A_M$ is a prefix automaton and $L(A_M) \\subseteq L_B$ .", + "bbox": [ + 111, + 736, + 783, + 752 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "4 Extension to TypeScript", + "text_level": 1, + "bbox": [ + 90, + 766, + 345, + 784 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We extend our completion engine described in §3 to handle a core subset of modern TypeScript. In this section, we selectively discuss the implementation of several interesting TypeScript features. We provide a comprehensive list of supported and unsupported TypeScript features in Appendix B.", + "bbox": [ + 90, + 787, + 907, + 838 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Constant Variable Declarations. In addition to variable declaration using let, TypeScript supports constant declarations using const. This defines immutable identifiers. We thus additionally track mutability of each identifier in the type environment and disallow applying the assignment operator to immutable identifiers.", + "bbox": [ + 90, + 847, + 907, + 913 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "171:14", + "bbox": [ + 92, + 84, + 135, + 94 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev", + "bbox": [ + 339, + 83, + 907, + 95 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Arrays. We add support for array type annotation, parsing array expressions, and reading from and assigning to array fields. In array expressions, we enforce that all array elements have the same type. Moreover, array types introduce another dimension of type nesting. Therefore we adapt the type reachability pruning heuristic to handle this additional dimension to ensure termination.", + "bbox": [ + 86, + 118, + 905, + 184 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "**Loops.** TypeScript supports various loop constructs, including for, while, do-while, and for...of loops. These are implemented mostly as variations of the statement block parser. The for...of loop uniquely constrains the right-hand side of the ..of operator to an array of any type. To adapt the type search, we introduce a generic array type $\\bullet[\\]$ , which matches any array type. For example, both types number[] and string[] match $\\bullet[\\]$ in Line 2 of Algorithm 2.", + "bbox": [ + 86, + 191, + 909, + 276 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Additional Operators and Types. We add several arithmetic and logic operators, such as modulo $\\%$ , exact equality $= = =$ , logical or $||$ , and the ternary operator $\\text{?}$ :. To handle these operators, we add additional edges to the type search graph. Moreover, we add support for post- and prefix operators such as -- and ++, which are only valid extensions to mutable expressions.", + "bbox": [ + 86, + 284, + 909, + 351 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Operator Precedence. TypeScript defines an operator precedence, which determines the implicit grouping of expressions. For example $1 + 2$ . toString() is parsed as $1 + (2$ . toString()) . We adapt our expression parsing algorithm in two places to handle operator precedences. First, in the expression automaton, we leverage the knowledge about previously parsed extensions to determine the implicit grouping and thus where the next operator is applied. For example, for state $q^{1} + 2$ , the member access extension $n$ is applied to 2, as opposed to $1 + 2$ . Second, we adapt the type search in Algorithm 2. Concretely, we ensure that only extensions that can be validly applied based on operator precedence are iterated over. For this, we track the operator precedence of previously parsed extensions and extensions considered during the traversal of the type graph and omit operators in Line 5 that violate operator precedence.", + "bbox": [ + 86, + 358, + 909, + 525 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Global Identifiers and Imports. In TypeScript, many identifiers are defined globally and available in any execution. These global identifiers are incorporated by initializing the type environment of the program automaton accordingly. Identifiers such as Math introduce additional types, which we additionally implement. We also model the import of the crypto library using require.", + "bbox": [ + 86, + 533, + 909, + 600 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Polymorphic Built-In Members. The TypeScript LOOKUP implementation defines a few polymorphic members for built-in types. For example, for array $\\times$ of type $T[]$ , $x$ . map(f) takes a callback function $f$ and returns a new array $[f(x[0]), f(x[1]), \\ldots]$ . If $f$ has type $(T) => P$ , the returned array has type $P[]$ . Here $P$ is a type parameter, which is instantiated by matching the type of the passed function to the type pattern.", + "bbox": [ + 86, + 608, + 909, + 691 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We support such polymorphisms by adapting the type search. We track type patterns and enforce that type parameters are instantiated before the goal type is reached. We then continue the search from the instantiated version. In the map example, when searching completions of x.map, we first search for functions that instantiate the type parameter, and then continue the search from the instantiated type. When anonymous functions are generated as call parameters, we enforce that the function matches the searched type pattern.", + "bbox": [ + 86, + 691, + 909, + 791 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Type Annotations. TypeScript is designed to be flexible, allowing many type annotations to be omitted when they can be automatically inferred. We generally support this, such as inferring types from initial values. However, it can lead to unexpected types when annotations are omitted, often confusing even experienced developers [47, 48]. Moreover, in the context of LLM-based code generation, having more type annotations can provide valuable information for both the model and our type-constraining algorithms. We have identified three situations where generated code often fails to compile without type annotations, prompting us to enforce them. First, we require", + "bbox": [ + 86, + 800, + 909, + 916 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Type-Constrained Code Generation with Language Models", + "bbox": [ + 90, + 81, + 495, + 95 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "171:15", + "bbox": [ + 862, + 83, + 907, + 95 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "annotations for all function parameters and return types. Second, all variable declarations must either have a type annotation or be initialized with a value. Third, we enforce type annotations for the first parameter of anonymous functions used as callbacks in the polymorphic built-in member reduce. These constraints trade off practical correctness with theoretical language completeness.", + "bbox": [ + 90, + 116, + 905, + 184 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "5 Experimental Evaluation", + "text_level": 1, + "bbox": [ + 90, + 198, + 355, + 213 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "We present an extensive evaluation of our type constraining approach on a variety of tasks and models. We outline our experimental setup (§5.1), evaluate the impact on compilation errors and functional correctness (§5.2), perform runtime analysis (§5.3), and present case studies (§5.4).", + "bbox": [ + 90, + 219, + 905, + 268 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "5.1 Experimental Setup", + "text_level": 1, + "bbox": [ + 90, + 283, + 323, + 300 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "We now outline our main evaluation setup, covering implementation, evaluated tasks, considered models, compared methods, and metrics. We provide further setup details and hyperparameter choices in Appendix B.", + "bbox": [ + 90, + 302, + 905, + 352 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Implementation. Our implementation is written in Python and contains 11249 lines of code. To ensure robust implementation, we built a large set of around four hundred unit tests and frequently compared the behaviors of our implementation with the official TypeScript compiler [42].", + "bbox": [ + 90, + 362, + 905, + 411 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Tasks and Benchmarks. We evaluate three relevant tasks of code generation:", + "bbox": [ + 90, + 422, + 765, + 437 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Synthesis: Given a natural language task description and a function header, the task is to generate a solution from scratch.", + "- Translation: Given a function written in Python and the header of an equivalent TypeScript function, the task is to generate the body of the equivalent function in TypeScript.", + "- Repair: Given a natural language task description, a non-compilable solution, the corresponding compiler error, and the function header, the task is to restore functionality of the flawed solution by resolving the compilation error." + ], + "bbox": [ + 109, + 443, + 905, + 558 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The benchmarks for these tasks are based on TypeScript-translated tasks from HumanEval [12] and MBPP [5], contained in the MultiPL-E dataset [13], with 159 and 384 instances each. We observe that success in generating valid code for the same sample can vary depending on the random seed used. To obtain more comprehensive results on the small HumanEval dataset, we generate each sample 4 times with different seeds and aggregate the outcomes. In MBPP, we generate each sample once. For Repair, we collect all non-compiling programs from the unconstrained synthesis task for all models, resulting in 292 and 248 instances for HumanEval and MBPP each.", + "bbox": [ + 90, + 563, + 905, + 679 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Models. We use 6 different open-weight LLMs, covering 3 LLMs of varying parameter sizes from the same model family and 4 models of a similar size from different model families: the Gemma 2 model family with 2B/9B/27B parameters [64], DeepSeekCoder 33B (abbreviated as DSCoder 33B) [28], CodeLlama 34B [59], and Qwen2.5 32B [73]. For all evaluated LLMs we choose the instruction-tuned variants, which are fine-tuned to follow instructions in a chat-style interaction, such that they adequately attempt to resolve the presented tasks.", + "bbox": [ + 90, + 688, + 905, + 787 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Compared Methods. We run unconstrained LLM sampling, reported as Vanilla. We measure the upper bound improvement of prior syntactic constraining methods [8, 57, 66] by assuming that all syntactically incorrect instances generated by Vanilla could be compiled under syntactic constraining. We refer to this improvement as idealized Syntax. We separately sample using type-constrained decoding based on our completion engine introduced in §3 and §4, and report it as Types. Due to the size and complexity of the full TypeScript compiler, featuring over 427,105 lines of code in 698 files [42], our extension does not cover all features of TypeScript. We therefore", + "bbox": [ + 90, + 798, + 905, + 913 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "171:16", + "bbox": [ + 92, + 84, + 135, + 95 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev", + "bbox": [ + 339, + 83, + 907, + 95 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/9fdda853962702543e4b74b4a4950ce8a8bb257496566563d2784d4fed07cc6e.jpg", + "table_caption": [ + "Table 2. Number of instances with compiler errors in unconstrained generation (Vanilla), idealized syntax-only constraining (Syntax), and our proposed type constraining (Types). Type constraining reduces compiler errors by $74.8\\%$ and $56.0\\%$ in the synthesis of HumanEval and MBPP problems respectively, compared to only $9.0\\%$ and $4.8\\%$ ideal improvement on the two datasets respectively through syntax-only constraining." + ], + "table_footnote": [], + "table_body": "
ModelSynthesisTranslationRepair
VanillaSyntaxTypesVanillaSyntaxTypesVanillaSyntaxTypes
HumanEvalGemma 2 2B10392↓10.7%44↓57.3%177149↓15.8%80↓54.8%194181↓6.7%103↓46.9%
Gemma 2 9B4541↓8.9%13↓71.1%7563↓16.0%16↓78.7%113108↓4.4%52↓54.0%
Gemma 2 27B1513↓13.3%2↓86.7%2020↓0.0%3↓85.0%4540↓11.1%22↓51.1%
DS Coder 33B2625↓3.8%5↓80.8%1817↓5.6%7↓61.1%3636↓0.0%15↓58.3%
CodeLlama 34B8671↓17.4%28↓67.4%158124↓21.5%59↓62.7%153142↓7.2%48↓68.6%
Qwen2.5 32B1717↓0.0%2↓88.2%2421↓12.5%5↓79.2%3634↓5.6%13↓63.9%
MBPPGemma 2 2B6764↓4.5%27↓59.7%126111↓11.9%79↓37.3%194184↓5.2%108↓44.3%
Gemma 2 9B3029↓3.3%10↓66.7%6761↓9.0%33↓50.7%129124↓3.9%63↓51.2%
Gemma 2 27B2019↓5.0%7↓65.0%3736↓2.7%22↓40.5%7169↓2.8%32↓54.9%
DS Coder 33B3232↓0.0%19↓40.6%2927↓6.9%13↓55.2%9090↓0.0%43↓52.2%
CodeLlama 34B8071↓11.2%41↓48.8%126114↓9.5%54↓57.1%157148↓5.7%76↓51.6%
Qwen2.5 32B1918↓5.3%13↓31.6%2222↓0.0%16↓27.3%5552↓5.5%29↓47.3%
", + "bbox": [ + 90, + 181, + 903, + 430 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "emulate a type constraining that supports the entire TypeScript feature set. Concretely, if a sample compiles correctly without any constraining, we report it as-is. Otherwise, we report the result of a constrained resample. For all methods, if generation takes more than 300 seconds, we report the partial program generated until the timeout.", + "bbox": [ + 86, + 459, + 907, + 527 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Metrics. We compute two main metrics to assess the effectiveness of the compared methods. First, we determine the number of compiler errors in model-generated outputs. We count as a compiler error any case in which the TypeScript compiler [42] reports an issue during compilation. To measure functional correctness, we leverage the pass@1 metric [14], which measures the percentage of code generations that pass the provided unit tests given only one trial.", + "bbox": [ + 86, + 536, + 909, + 620 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "5.2 Results on Compilation and Functional Correctness", + "text_level": 1, + "bbox": [ + 88, + 634, + 623, + 650 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "In this section, we present our experimental results, showing that on all three code-generation-related tasks, our type constraining approach significantly improves the considered LLMs in generating both compileable and functionally correct code. It also substantially outperforms syntax-only constraining.", + "bbox": [ + 86, + 655, + 909, + 722 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Reduction of Compilation Errors. In Table 2, we present the number of compilation errors produced by each compared method. For synthesis and translation, in the unconstrained setting (Vanilla), on average only $9.0\\%$ and $4.9\\%$ of the non-compiling instances in HumanEval and MBPP respectively are due to syntactic errors (Syntax), with Qwen2.5 32B even making no syntax errors at all for HumanEval synthesis and MBPP translation. In contrast, type constraining reduces compilation errors by more than half, i.e., by $75.3\\%$ and $52.1\\%$ on HumanEval and MBPP respectively. We observe that models across all sizes and families benefit similarly from our constraining, with a minimum error reduction of $54.8\\%$ and $27.3\\%$ on HumanEval and MBPP respectively, highlighting the general effectiveness of our approach.", + "bbox": [ + 86, + 731, + 907, + 880 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "A straightforward way to improve successful compilation of LLM-generated code is to feed the erroneous code and the error message back to an LLM for correction – our repair task. Thanks", + "bbox": [ + 86, + 881, + 907, + 913 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Type-Constrained Code Generation with Language Models", + "bbox": [ + 90, + 81, + 495, + 97 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "171:17", + "bbox": [ + 862, + 83, + 905, + 95 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/946af2491169f3cdaa486ef0392824c733f75141a4c823922253fc196de5d150.jpg", + "table_caption": [ + "Table 3. pass@1 of unconstrained generation (Vanilla) and type constraining (Types). The benefit of our type-constraining approach transfers from reduced compilation errors to improved functional correctness." + ], + "table_footnote": [], + "table_body": "
ModelSynthesisTranslationRepair
VanillaTypesVanillaTypesVanillaTypes
HumanEvalGemma 2 2B29.130.2↑3.8%50.253.9↑7.5%11.620.9↑79.4%
Gemma 2 9B56.658.3↑3.1%73.778.3↑6.2%24.034.9↑45.7%
Gemma 2 27B69.571.2↑2.5%86.687.7↑1.3%38.441.1↑7.1%
DS Coder 33B68.971.1↑3.2%88.790.1↑1.6%47.650.7↑6.5%
CodeLlama 34B41.043.4↑5.7%58.663.5↑8.3%17.527.4↑56.9%
Qwen2.5 32B79.681.8↑2.8%92.193.9↑1.9%65.471.2↑8.9%
MBPPGemma 2 2B40.442.4↑5.2%52.356.0↑7.0%12.122.6↑86.7%
Gemma 2 9B65.467.4↑3.2%71.475.8↑6.2%24.231.9↑31.7%
Gemma 2 27B70.672.1↑2.2%83.184.4↑1.6%39.145.2↑15.5%
DS Coder 33B65.467.2↑2.8%85.989.1↑3.6%35.143.1↑23.0%
CodeLlama 34B42.245.6↑8.0%55.763.3↑13.6%15.726.6↑69.2%
Qwen2.5 32B76.376.6↑0.3%89.690.4↑0.9%48.054.0↑12.6%
", + "bbox": [ + 102, + 150, + 893, + 397 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "to its general applicability, our type constraining approach can also enhance this process. Our experimental results in the setting of code repair are also depicted in Table 2. We find that, in the vanilla setting, many models struggle to correctly localize and resolve compilation errors, with Gemma 2 2B for example repairing only $33.5\\%$ and $25.8\\%$ of the non-compiling HumanEval and MBPP instances, respectively. This is substantially increased to $56.4\\%$ and $58.4\\%$ through type constraining. On average, using type-constrained sampling, $53.7\\%$ more compilation errors are resolved than using vanilla LLM decoding.", + "bbox": [ + 90, + 423, + 905, + 538 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Improving Functional Correctness. Programs that do not compile are always functionally incorrect. With our type constraining method, non-compilable generations can be turned into well-formed ones, offering the possibility of achieving functional correctness. In Table 3, we experimentally show that type constraining universally improves the functional correctness of LLM-generated code. On the three tasks considered, employing type constraining improves LLMs' pass@1 rate, achieving an average increase by $3.5\\%$ in synthesis, $5.0\\%$ in translation, and $37.0\\%$ in repair tasks. The larger improvement in the latter is due to vanilla LLMs generally struggling to generate functionally correct code. One interesting phenomenon is that, for stronger models,", + "bbox": [ + 90, + 547, + 907, + 681 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "constraints more likely lead to recovering functionally correct code. For example on the synthesis task, for Gemma 2 27B, out of the 26 instances that required resampling to compile successfully, 17 are also functionally correct. For Qwen2.5 32B, 15 out of 21 such instances were correct.", + "bbox": [ + 90, + 681, + 561, + 779 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "5.3 Runtime Analysis", + "text_level": 1, + "bbox": [ + 90, + 794, + 304, + 811 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "As discussed in §2, compared with vanilla LLM decoding, our constrained decoding algorithm runs an additional loop (Line 4 of Algorithm 1), where tokens are sampled from an LLM-produced next-token probability distribu", + "bbox": [ + 90, + 815, + 561, + 880 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "tion and checked against the completion engine. In this section, we investigate how this process introduces additional runtime overhead for our type constraining. Note that for each selected token,", + "bbox": [ + 90, + 881, + 905, + 913 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/743d1ede4fcce2829efeee991f239fd26c6755ecff1cb142a39893bdf51e8ec9.jpg", + "table_caption": [ + "Table 4. Median time per synthesis instance in seconds spent by our type-constrained decoding and its relative increase compared with unconstrained decoding (Vanilla)." + ], + "table_footnote": [], + "table_body": "
ModelHumanEvalMBPP
Gemma 2 2B6.7↑38.3%6.3↑35.4%
Gemma 2 9B8.3↑29.2%9.5↑46.8%
Gemma 2 27B11.7↑19.9%11.7↑32.8%
DS Coder 33B11.5↑36.2%9.4↑59.5%
CodeLlama 34B7.6↑40.8%7.0↑37.6%
Qwen2.5 32B7.3↑39.6%4.9↑54.8%
", + "bbox": [ + 578, + 750, + 903, + 875 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "171:18", + "bbox": [ + 92, + 84, + 135, + 94 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev", + "bbox": [ + 339, + 83, + 907, + 95 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "vanilla and constrained decoding both run LLM inference only once, meaning that there is no extra overhead from LLM inference in constrained decoding.", + "bbox": [ + 86, + 118, + 907, + 152 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Overhead of Type Constraining. For an application of our method in practice, the effective runtime increase due to constrained decoding is highly relevant. To assess it, we measure the runtime per synthesis instance in HumanEval and MBPP for both unconstrained and type-constrained decoding. We report the median runtime per instance for type constraining and its relative increase to unconstrained decoding in Table 4. On average over the evaluated models, we observe a relative increase of $39.1\\%$ and $52.1\\%$ in HumanEval and MBPP respectively. We consider this impact to be bearable for the observed significant decrease in compilation errors. Moreover, this is measured on an unoptimized, Python-based implementation and could be significantly improved by a more system-oriented implementation, such as the one proposed by Dong et al. [18].", + "bbox": [ + 86, + 162, + 909, + 313 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Number of Sample-and-Check Loop Iterations. To provide an in-depth analysis of the overhead of our type constraining method, we measure the number of iterations spent by the sample-and-check loop to find an admissible token. The results are provided in Figure 8. We observe that the number of loop iterations follows a long-tail distribution. For $99.4\\%$ of cases, only one loop iteration is needed. This number is even higher for stronger models, with Gemma 2 9B and 27B requiring one iteration in $99.6\\%$ and $99.9\\%$ of cases, respectively. This means that, in most instances, LLMs can generate a valid token on the first attempt, which is then verified by the completion engine. In cases where more than one iteration is needed, the completion engine intervenes to guide the selection", + "bbox": [ + 86, + 320, + 567, + 552 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/847ef4f35ce5a95c759a2415d323273e8c7cbd2578acc0a62740b9dd06115d1b.jpg", + "image_caption": [ + "Figure 8. Histogram on the number of iterations consumed by the sample-and-check loop at Line 4 of Algorithm 1 to find a valid token, measured with Gemma 2 2B for HumanEval synthesis." + ], + "image_footnote": [], + "bbox": [ + 584, + 327, + 899, + 470 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "of valid tokens. These interventions help resolve errors in many instances in our benchmarks, providing significant benefit, as discussed in §5.2.", + "bbox": [ + 86, + 554, + 909, + 586 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Prior work [8, 57, 66] implemented constrained decoding differently than Algorithm 1. Instead of running the sample-and-check loop, they execute the completion engine for all tokens in the LLM's vocabulary, mask out all invalid tokens, and sample once from the remaining valid tokens based on their normalized likelihoods. This implementation is less efficient than ours, especially when calling the completion engine is costly. Based on the long-tail distribution depicted in Figure 8, our implementation requires only a single invocation to the completion engine for most cases, whereas their implementation calls it for every token in the LLM's vocabulary for all cases. In our experiments, their implementation results in timeouts for all benchmark instances, with a timeout of 5 minutes per instance. This justifies our choice of using the sample-and-check loop.", + "bbox": [ + 86, + 587, + 911, + 737 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "5.4 Case Study", + "text_level": 1, + "bbox": [ + 88, + 751, + 246, + 766 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "For a qualitative evaluation, we manually inspect instances where unconstrained decoding fails and our type constraining approach successfully corrects errors. We find that our technique effectively amends various types of compilation errors. We showcase three such examples in Figure 9.", + "bbox": [ + 86, + 770, + 909, + 823 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Wrong Parameter Count. In Figure 9a, the task is to find all words in a string that contain a number of consonants. When Gemma 2 2B attempts to solve this problem, translating from the Python version to TypeScript, it calls split without arguments. This is allowed in Python, but not in TypeScript, where a single string argument is required. Type constraining correctly detects this and guides the generation to sample a space character as parameter.", + "bbox": [ + 86, + 831, + 909, + 915 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Type-Constrained Code Generation with Language Models", + "bbox": [ + 90, + 81, + 495, + 97 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "171:19", + "bbox": [ + 862, + 83, + 907, + 95 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/bf0afb47f1cfa0e9f681afec1348c6553d11edd6b8f05b746402a2fb18e6f6cd.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
VanillaTypes
(a)// find all words with consonants function select_words(s: string): string[] { for (const word of s. split()// find all words with consonants function select_words(s: string): string[] { for (const word of s. split(' ')
(b)function largest_divisor(n: number): number { for(let i = n - 1; i > 0; i-- ) { if(n % i == 0) return i; } // No return }function largest_divisor(n: number): number { for(let i = n - 1; i > 0; i-- ) { if(n % i == 0) return i; } return 1; }
(c)// insert a delimiter between every element return nums.reduce( (acc, curr, index) => { acc.pushurr); return acc; }, [])// insert a delimiter between every element return nums.reduce( (acc: number[] , num, index) => { acc.pushurr); return acc; }, [])
", + "bbox": [ + 98, + 115, + 903, + 338 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Figure 9. Three examples illustrating the effect of type-constrained sampling. Left are unconstrained generations with problematic tokens highlighted in red, and right are type-constrained results with corrected tokens highlighted in green, adapted for clarity. In (a), Gemma 2 2B attempts to call split, missing required arguments. In (b), DeepSeekCoder 33B attempts to complete a function without a guaranteed return. The issue is resolved by forcing generation of another statement after the main loop. In (c), Gemma 2 9B calls reduce with an anonymous function without type annotation. This leads to an incorrect type inference for the first parameter. The issue is solved by guiding the model to add type annotation.", + "bbox": [ + 86, + 345, + 905, + 455 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Missing Return Statement. In Figure 9b, to complete function largest_divisor, the model must compute a straightforward divisor loop. DeepSeekCoder 33B Instruct [28] implements a correct loop, but does not guarantee returning a value in every execution path. When the return statement in the loop is never executed, e.g., for negative inputs, the function thus returns undefined, violating the type rules. Our method detects this issue and forces the generation of another statement in the function body, resulting in a correct fallback return statement.", + "bbox": [ + 86, + 490, + 905, + 588 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Incorrect Type Inference. In Figure 9c, the task is to insert a delimiter between every element in an array. Gemma 2 9B solves this with the reduce function. This generic function accepts two arguments; first, a callback function that is called consecutively for every element in the array and accumulates a result, second, an initial value for the callback function. The type of the accumulator of the callback is derived implicitly from the second argument, which is an empty array in the given example. TypeScript infers special type never[] for the empty array, disallowing inserting curr of type number through push. Therefore, the program fails to compile. This issue is a well-known limitation of the TypeScript compiler, often confusing even expert developers [47, 48]. Our method resolves it by enforcing adequate type annotation on the first argument of the callback function.", + "bbox": [ + 86, + 601, + 905, + 750 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "6 Discussion", + "text_level": 1, + "bbox": [ + 88, + 766, + 224, + 780 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Our general type constraining approach, backed by strong experimental results, opens exciting avenues for future research, which we discuss below.", + "bbox": [ + 86, + 786, + 907, + 818 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Implementation Effort. Developing a completion engine for a target programming language currently requires manual efforts. However, we expect that the involved effort to adopt our method to other languages will be reduced significantly, as many features transfer from our implementation for $L_{B}$ and TypeScript. Moreover, we believe that, due to the huge impact on LLM's code generation, the effort will pay off. Future programming language developers may consider generally writing", + "bbox": [ + 86, + 831, + 909, + 913 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "171:20", + "bbox": [ + 92, + 84, + 133, + 94 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev", + "bbox": [ + 339, + 83, + 905, + 95 + ], + "page_idx": 19 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "function sort_threel(number[],r:number[]):number[]{ \nfor(let $\\mathrm{i} = 0$ ;i<1.length; $\\mathrm{i + + }$ ){ \nr.push(l[i].toString().slice(0,3).concat(l[i].ToString().split())'.split').reverse() .join(')).split''.reverse().join('').ToString() $^+$ l[i].ToString().slice(3).split')…", + "guess_lang": "javascript", + "bbox": [ + 94, + 120, + 899, + 184 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Figure 10. Complications errors remain when the model does not terminate after a corrected token. In this example for synthesis on the HumanEval task #33, CodeLlama 34B is steered away from accessing non-existing member .sort and instead accesses .string.", + "bbox": [ + 86, + 190, + 909, + 238 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "their compilers as an incremental completion engine, which additionally enables automatic adoption for constrained code generation, besides conventional grammar parsing and type checking.", + "bbox": [ + 86, + 273, + 907, + 308 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Broader Application to More Complex Tasks and Stronger LLMs. Stronger LLMs, such as the latest OpenAI models [33], may make fewer typing errors on the HumanEval and MBPP datasets. Our evaluation results in Table 2 also demonstrate that compilation errors decrease with increasing model size for the Gemma family. However, recent findings showed that currently, even the strongest LLMs struggle with generating compilable code for more complex coding tasks, stricter typing rules, and low-resource languages (e.g., new DSLs). Gusanidas [29] evaluated various state-of-the-art LLMs on difficult code synthesis tasks in Rust, reporting compilation error rates of $18\\%$ for OpenAI o1-mini [33], $39\\%$ for DeepSeek R1 [15] and $27\\%$ for Anthropic's Claude 3.5 Sonnet [2]. For OCaml and Haskell, which are sparsely represented in LLMs' training data, the error rate is even higher at $40\\% - 60\\%$ for all models, matching a trend of worse performance on low-resource languages [24, 36]. Pan et al. [54] compiled a large dataset of code translation and found $44.3\\%$ of GPT-4-generated code to contain compilation errors. Similarly, Shetty et al. [61] report around $25\\%$ compilation errors for C-to-Rust translation using OpenAI o1 models. Our type constraining approach is broadly applicable to all these scenarios and our work presents a promising proof of concept. Future work can consider building upon our approach to address these challenges.", + "bbox": [ + 90, + 316, + 909, + 566 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Constrained decoding in general requires access to the next-token probability distributions produced by LLMs. Currently, commercially available black-box LLM APIs only return sampled tokens and do not offer complete next-token distributions. A possible solution is to integrate our method into the backend of model providers, as was recently implemented for guaranteeing adherence to JSON Schemas [3, 50].", + "bbox": [ + 86, + 566, + 909, + 650 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Remaining Compiler Errors. We observe that, even though constrained decoding guarantees a valid result upon termination, a considerable amount of compilation errors remain due to non-termination within the token or time limit. We find this to be caused by generation loops, entered when generation is amended by constraints and the LLM is unable to recover. An example is depicted in Figure 10, where CodeLlama 34B tries to access the invalid member sort on an expression of type number. Future work may add additional constraints to force stopping such unconstructive loops and steer the model more strictly, e.g., by limiting the complexity of generated expressions.", + "bbox": [ + 86, + 661, + 909, + 779 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "7 Related Work", + "text_level": 1, + "bbox": [ + 88, + 793, + 257, + 808 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Code Language Models. Recently, LLMs have gained traction for diverse coding tasks such as code synthesis, repair, or translation [35]. These models are typically trained on datasets containing billions to trillions of tokens and have billions of parameters, with both factors contributing to improved performance in code-related benchmarks [28, 46, 59, 64]. Meanwhile, LLMs are well known to frequently make mistakes [32, 58], and, as we show in this work, even state-of-the-art open-weight models with over 30 billion parameters frequently make errors in code generation.", + "bbox": [ + 86, + 813, + 909, + 915 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Type-Constrained Code Generation with Language Models", + "bbox": [ + 88, + 81, + 495, + 97 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "171:21", + "bbox": [ + 862, + 83, + 905, + 95 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Improving Language Model Accuracy. Apart from constrained decoding, three primary approaches have been proposed to enhance the accuracy of language models on code tasks: fine-tuning, retrieval augmentation (RAG), and compiler or execution feedback. Fine-tuning adapts the model weights based on specifically collected training data. This process is highly resource intensive [65, 70]. RAG provides the model with additional context based on a database or related code snippets [6, 57]. Compiler and execution feedback is only available after completing the model generation and requires resampling [16, 34, 69]. However, constrained decoding is orthogonal to these methods and, as indicated by Poesia et al. [57] and our experimental results, combining constrained decoding with RAG or compiler feedback additionally improves model performance.", + "bbox": [ + 90, + 116, + 907, + 266 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Constrained Decoding. Prior work on constrained decoding failed to achieve strong results due to its limitation to syntactic language features. Constraining to context-free languages has been explored extensively in recent work [7, 8, 57, 71]. Simple context-sensitive syntactic features, such as the space indentation in Python and the scope markers in Go have also been implemented [41, 66]. As demonstrated in §5, however, syntax errors on average account for only $6\\%$ of compilation errors in recent code models. The rarity of syntax errors significantly reduces the potential of leveraging them for improvements in code correctness. Meanwhile, our type-constrained decoding more than halved compilation errors.", + "bbox": [ + 90, + 275, + 907, + 408 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Type Systems for Code Synthesis. Previous work that leveraged type systems for code synthesis was confined to specialized settings and unable to constrain general, complex program generation. Poesia et al. [57] proposed using known column names to guide SQL query generation. Gvero et al. [30] employed a search on the type graph for function call completion. Agrawal et al. [1] leverage language-server-generated type annotations for object member accesses. Blinn et al. [11] use language-server-derived type information to provide additional context to the LLM, but not to enforce hard constraints. Additionally, type constraints have been used to direct code synthesis based on specialized search procedures [22, 56, 69]. However, these methods are not compatible with LLM-based code generation. This limits their ability to exploit the powerful natural language and general-purpose capabilities of LLMs.", + "bbox": [ + 90, + 416, + 907, + 581 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "8 Conclusion", + "text_level": 1, + "bbox": [ + 90, + 595, + 228, + 611 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "In this work, we explored how type systems in programming languages can be used to guide language models during decoding. Concretely, we design and implement prefix automata to perform type constraining for a foundational simply typed language and then extend it to the popular language TypeScript. We extensively evaluate the impact of using such constraints for code synthesis, translation, and repair and observe that we more than halve compilation errors on a diverse set of models and consistently increase functional correctness. We further explore qualitatively how the constraining positively impacts code generation. We conclude that such type constraining should be implemented for more programming languages, and has the potential to generally improve code generation in many domains.", + "bbox": [ + 90, + 616, + 907, + 765 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "171:22", + "bbox": [ + 92, + 84, + 135, + 94 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev", + "bbox": [ + 339, + 83, + 907, + 95 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Artifact Availability", + "text_level": 1, + "bbox": [ + 88, + 118, + 279, + 136 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "The artifact for this paper, including source code, datasets, and reproductions scripts, is available on GitHub (https://github.com/eth-sri/type-constrained-code-generation) and Zenodo [45].", + "bbox": [ + 86, + 138, + 907, + 173 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Acknowledgements", + "text_level": 1, + "bbox": [ + 88, + 187, + 279, + 204 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "We would like to thank the anonymous reviewers for their in-depth and constructive feedback, and the artifact reviewers for their feedback on our artifact accessibility.", + "bbox": [ + 86, + 206, + 907, + 241 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 92, + 256, + 197, + 272 + ], + "page_idx": 22 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Lakshya Agrawal, Aditya Kanade, Navin Goyal, Shuvendu K Lahiri, and Sriram Rajamani. 2023. Monitor-Guided Decoding of Code LMs with Static Analysis of Repository Context. In NeurIPS. https://openreview.net/forum?id=qPUbKxKvXq", + "[2] Anthropic. [n.d.]. Claude 3 Model Card. https://assets.anthropic.com/m/61e7d27f8c8f5919/original/Claude-3-ModelCard.pdf Accessed: March 10, 2025.", + "[3] Anthropic. 2025. JSON Mode. https://docs.anthropic.com/en/docs/build-with-claude/tool-use#json-mode Accessed: March 10, 2025.", + "[4] Ken Arnold and James Gosling. 1996. The Java Programming Language.", + "[5] Jacob Austin, Augustus Odena, Maxwell I. Nye, Maarten Bosma, Henryk Michalewski, David Dohan, Ellen Jiang, Carrie J. Cai, Michael Terry, Quoc V. Le, et al. 2021. Program Synthesis with Large Language Models. arXiv Preprint (2021). https://arxiv.org/abs/2108.07732", + "[6] Nastaran Bassamzadeh and Chhaya Methani. 2024. A Comparative Study of DSL Code Generation: Fine-Tuning vs. Optimized Retrieval Augmentation. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2407.02742", + "[7] Luca Beurer-Kellner, Marc Fischer, and Martin Vechev. 2023. Prompting Is Programming: A Query Language for Large Language Models. PLDI (2023). https://doi.org/10.1145/3591300", + "[8] Luca Beurer-Kellner, Marc Fischer, and Martin Vechev. 2024. Guiding LLMs The Right Way: Fast, Non-Invasive Constrained Generation. In ICML. https://openreview.net/forum?id=pXaEYzrFae", + "[9] Satwik Bhattachamishra, Kabir Ahuja, and Navin Goyal. 2020. On the Ability and Limitations of Transformers to Recognize Formal Languages. In EMNLP. https://doi.org/10.18653/v1/2020.emnlp-main.576", + "[10] Gavin M. Bierman, Martin Abadi, and Mads Torgersen. 2014. Understanding TypeScript. In ECOOP.", + "[11] Andrew Blinn, Xiang Li, June Hyung Kim, and Cyrus Omar. 2024. Statically Contextualizing Large Language Models with Typed Holes. OOPSLA (2024). https://doi.org/10.1145/3689728", + "[12] Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. 2020. Language Models are Few-Shot Learners. In NeurIPS. https://proceedings.neurips.cc/paper/2020/bash/1457c0d6bfcb4967418bf8ac142f64a-Abstract.html", + "[13] Federico Cassano, John Gouwar, Daniel Nguyen, Sydney Nguyen, Luna Phipps-Costin, Donald Pinckney, Ming-Ho Yee, Yangtian Zi, Carolyn Jane Anderson, Molly Q. Feldman, et al. 2023. MultiPL-E: A Scalable and Polyglot Approach to Benchmarking Neural Code Generation. IEEE Trans. Software Eng. (2023).", + "[14] Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Pondé de Oliveira Pinto, Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, Greg Brockman, et al. 2021. Evaluating Large Language Models Trained on Code. arXiv Preprint (2021). https://arxiv.org/abs/2107.03374", + "[15] DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, et al. 2025. DeepSeek-R1: Incentivizing Reasoning Capability in LLMs via Reinforcement Learning. arXiv Preprint (2025). https://doi.org/10.48550/arXiv.2501.12948", + "[16] Pantazis Deligiannis, Akash Lal, Nikita Mehrotra, Rishi Poddar, and Aseem Rastogi. 2025. RustAssistant: Using LLMs to Fix Compilation Errors in Rust Code. In ICSE. https://www.microsoft.com/en-us/research/publication/rustassistant-using-llms-to-fix-compiler-errors-in-rust-code/", + "[17] TypeScript Developers. [n.d.]. TypeScript: Documentation – More on Functions. https://www.typescriptlang.org/docs/handbook/2/functions.html#function-type-expressions Accessed: March 10, 2025.", + "[18] Yixin Dong, Charlie F. Ruan, Yaxing Cai, Ruihang Lai, Ziyi Xu, Yilong Zhao, and Tianqi Chen. 2024. XGrammar: Flexible and Efficient Structured Generation Engine for Large Language Models. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2411.15100", + "[19] Alan AA Donovan and Brian W Kernighan. 2015. The Go programming language.", + "[20] Shihan Dou, Haoxiang Jia, Shenxi Wu, Huiyuan Zheng, Weikang Zhou, Muling Wu, Mingxu Chai, Jessica Fan, Caishuang Huang, Yunbo Tao, et al. 2024. What's Wrong with Your Code Generated by Large Language Models? An Extensive Study. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2407.06153" + ], + "bbox": [ + 90, + 276, + 907, + 913 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "Type-Constrained Code Generation with Language Models", + "bbox": [ + 88, + 81, + 495, + 97 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "171:23", + "bbox": [ + 862, + 83, + 907, + 95 + ], + "page_idx": 22 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[21] Javid Ebrahimi, Dhruv Gelda, and Wei Zhang. 2020. How Can Self-Attention Networks Recognize Dyck-n Languages?. In EMNLP. https://aclanthology.org/2020-findings-emnlp.384/", + "[22] Jonás Fiala, Shachar Itzhaky, Peter Müller, Nadia Polikarpova, and Ilya Sergey. 2023. Leveraging Rust Types for Program Synthesis. PLDI (2023). https://doi.org/10.1145/3591278", + "[23] Zheng Gao, Christian Bird, and Earl T. Barr. 2017. To type or not to type: quantifying detectable bugs in JavaScript. In ICSE. https://doi.org/10.1109/ICSE.2017.75", + "[24] Alessandro Giagnorio, Alberto Martin-Lopez, and Gabriele Bavota. 2025. Enhancing Code Generation for Low-Resource Languages: No Silver Bullet. arXiv Preprint (2025). https://doi.org/10.48550/arXiv.2501.19085", + "[25] GitHub. [n.d.]. https://github.com/features/copilot", + "[26] GitHub. 2022. The top programming languages. https://octoverse.github.com/2022/top-programming-languages", + "[27] Aaron Grattaflori, Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Alex Vaughan, et al. 2024. The Llama 3 Herd of Models. ArXiv Preprint (2024). https://arxiv.org/abs/2407.21783", + "[28] Daya Guo, Qihao Zhu, Dejian Yang, Zhenda Xie, Kai Dong, Wentao Zhang, Guanting Chen, Xiao Bi, Y. Wu, Y. K. Li, et al. 2024. DeepSeek-Coder: When the Large Language Model Meets Programming - The Rise of Code Intelligence. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2401.14196", + "[29] Gusanidas. [n.d.]. Compilation Benchmark. https://github.com/Gusanidas/compilation-benchmark Accessed: March 10, 2025.", + "[30] Tihomir Gvero, Viktor Kuncak, Ivan Kuraj, and Ruzica Piskac. 2013. Complete completion using types and weights. In PLDI. https://doi.org/10.1145/2491956.2462192", + "[31] John E. Hopcroft and Jeffrey D. Ullman. 1979. Introduction to Automata Theory, Languages and Computation.", + "[32] Lei Huang, Wejiang Yu, Weitao Ma, Weihong Zhong, Zhangyin Feng, Haotian Wang, Qianglong Chen, Weihua Peng, Xiaocheng Feng, Bing Qin, et al. 2023. A Survey on Hallucination in Large Language Models: Principles, Taxonomy, Challenges, and Open Questions. arXiv Preprint (2023). https://doi.org/10.48550/arXiv.2311.05232", + "[33] Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. 2024. OpenAI o1 System Card. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2412.16720", + "[34] Prithwish Jana, Piyush Jha, Haoyang Ju, Gautham Kishore, Aryan Mahajan, and Vijay Ganesh. 2024. CoTran: An LLM-Based Code Translator Using Reinforcement Learning with Feedback from Compiler and Symbolic Execution. In ECAI (Frontiers in Artificial Intelligence and Applications). https://doi.org/10.3233/FAIA240968", + "[35] Juyong Jiang, Fan Wang, Jiasi Shen, Sungju Kim, and Sunghun Kim. 2024. A Survey on Large Language Models for Code Generation. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2406.00515", + "[36] Sathvik Joel, Jie JW Wu, and Fatemeh H. Fard. 2024. Survey on Code Generation for Low resource and Domain Specific Programming Languages. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2410.03981", + "[37] Anton Lozhkov, Raymond Li, Loubna Ben Allal, Federico Cassano, Joel Lamy-Poirier, Nouamane Tazi, Ao Tang, Dmytro Pykhtar, Jiawei Liu, Yuxiang Wei, et al. 2024. StarCoder 2 and The Stack v2: The Next Generation. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2402.19173", + "[38] Madnight. 2024. GitHub 2.0. https://madnight.github.io/git/#/pull_requestes/2024/1", + "[39] Harry G. Mairson. 2004. Linear lambda calculus and PTIME-completeness. J. Funct. Program. (2004). https://doi.org/10.1017/S0956796804005131", + "[40] Nicholas D Matsakis and Felix S Klock. 2014. The rust language. ACM SIGAda Ada Letters (2014).", + "[41] Daniel Melcer, Nathan Fulton, Sanjay Krishna Gouda, and Haifeng Qian. 2024. Constrained Decoding for Fill-in-the-Middle Code Language Models via Efficient Left and Right Quotienting of Context-Sensitive Grammars. (2024). https://arxiv.org/abs/2402.17988", + "[42] Microsoft. 2024. TypeScript. https://github.com/microsoft/TypeScript. Accessed on November 9, 2024, commit #ef802b1.", + "[43] John C. MITCHELL. 1990. Type Systems for Programming Languages. In Formal Models and Semantics. https://www.sciencedirect.com/science/article/pii/B9780444880741500135", + "[44] Niklas Muennighoff, Qian Liu, Armel Randy Zebaze, Qinkai Zheng, Binyuan Hui, Terry Yue Zhuo, Swayam Singh, Xiangru Tang, Leandro von Werra, and Shayne Longpre. 2024. OctoPack: Instruction Tuning Code Large Language Models. In ICLR. https://openreview.net/forum?id=mw1PWNSWZP", + "[45] Niels Mündler, Jingxuan He, Hao Wang, Koushik Sen, Dawn Song, and Martin Vechev. 2025. Reproduction Package for \"Type-Constrained Code Generation with Language Models\". doi:10.5281/zenodo.15355889", + "[46] Niels Mündler, Mark Niklas Müller, Jingxuan He, and Martin Vechev. 2024. SWT-Bench: Testing and Validating Real-World Bug-Fixes with Code Agents. In NeurIPS. http://papers.nips.cc/paper_files/paper/2024/bit/94f093b41fc2666376fb1f667fe282f3-AbsAbstract-Conference.html" + ], + "bbox": [ + 90, + 119, + 907, + 894 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "171:24", + "bbox": [ + 92, + 84, + 135, + 94 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev", + "bbox": [ + 339, + 83, + 907, + 95 + ], + "page_idx": 23 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[47] nielstron. 2024. Incorrect type deducted for accumulator in reduce. https://github.com/microsoft/TypeScript/issues/59999.", + "[48] nop33. 2024. Wrong inferred initial value in reduce. https://github.com/microsoft/TypeScript/issues/59863.", + "[49] OpenAI. 2023. GPT-4 Technical Report. arXiv Preprint (2023). https://doi.org/10.48550/arXiv.2303.08774", + "[50] OpenAI. 2025. Structured Outputs. https://platform.openai.com/docs/guides/structured-outputs Accessed: March 10, 2025.", + "[51] Gabriel Orlanski, Kefan Xiao, Xavier Garcia, Jeffrey Hui, Joshua Howland, Jonathan Malmaud, Jacob Austin, Rishabh Singh, and Michele Catasta. 2023. Measuring the Impact of Programming Language Distribution. In ICML. https://proceedings.mlr.press/v202/orlanski23a.html", + "[52] oxc project. 2024. oxc - The Javascript Oxidation Compiler. https://github.com/oxc-project/oxc.", + "[53] Rangeet Pan, Ali Reza Ibrahimzada, Rahul Krishna, Divya Sankar, Lambert Pouguem Wassi, Michele Merler, Boris Sobolev, Raju Pavuluri, Saurabh Sinha, and Reyhaneh Jabbarvand. 2024. Lost in Translation: A Study of Bugs Introduced by Large Language Models while Translating Code. In ICSE. https://doi.org/10.1145/3597503.3639226", + "[54] Rangeet Pan, Ali Reza Ibrahimzada, Rahul Krishna, Divya Sankar, Lambert Pouguem Wassi, Michele Merler, Boris Sobolev, Raju Pavuluri, Saurabh Sinha, and Reyhaneh Jabbarvand. 2024. Lost in Translation: A Study of Bugs Introduced by Large Language Models while Translating Code. In ICSE. https://doi.org/10.1145/3597503.3639226", + "[55] Hammond Pearce, Baleegh Ahmad, Benjamin Tan, Brendan Dolan-Gavitt, and Ramesh Karri. 2022. Asleep at the Keyboard? Assessing the Security of GitHub Copilot's Code Contributions. In S&P. https://doi.org/10.1109/SP46214.2022.9833571", + "[56] Daniel Perelman, Sumit Gulwani, Thomas Ball, and Dan Grossman. 2012. Type-directed completion of partial expressions. In PLDI. https://doi.org/10.1145/2254064.2254098", + "[57] Gabriel Poesia, Alex Polozov, Vu Le, Ashish Tiwari, Gustavo Soares, Christopher Meek, and Sumit Gulwani. 2022. Synchronesh: Reliable Code Generation from Pre-trained Language Models. In ICLR. https://openreview.net/forum?id=KmtVD97J43e", + "[58] Vipula Rawte, Amit P. Sheth, and Amitava Das. 2023. A Survey of Hallucination in Large Foundation Models. arXiv Preprint (2023). https://doi.org/10.48550/arXiv.2309.05922", + "[59] Baptiste Rozière, Jonas Gehring, Fabian Gloeckle, Sten Sootla, Itai Gat, Xiaqing Ellen Tan, Yossi Adi, Jingyu Liu, Tal Remez, Jérémy Rapin, et al. 2023. Code Llama: Open Foundation Models for Code. arXiv Preprint (2023). https://doi.org/10.48550/arXiv.2308.12950", + "[60] Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Neural Machine Translation of Rare Words with Subword Units. In ACL. https://doi.org/10.18653/v1/p16-1162", + "[61] Manish Shetty, Naman Jain, Adwait Godbole, Sanjit A. Seshia, and Koushik Sen. 2024. Syzygy: Dual Code-Test C to (safe) Rust Translation using LLMs and Dynamic Analysis. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2412.14234", + "[62] Vince Szabo, Dominik Winterer, and Zhendong Su. 2024. Compilation Quotient (CQ): A Metric for the Compilation Hardness of Programming Languages. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2406.04778", + "[63] Florian Tambon, Arghavan Moradi Dakhel, Amin Nikanjam, Foutse Khomh, Michel C. Desmarais, and Giuliano Antoniol. 2025. Bugs in large language models generated code: an empirical study. Empir. Softw. Eng. (2025). https://doi.org/10.1007/s10664-025-10614-4", + "[64] Gemma Team, Morgane Riviere, Shreya Pathak, Pier Giuseppe Sessa, Cassidy Hardin, Surya Bhupatiraju, Léonard Hussenot, Thomas Mesnard, Bobak Shahriari, Alexandre Ramé, et al. 2024. Gemma 2: Improving Open Language Models at a Practical Size. arXiv Preprint (2024). https://arxiv.org/abs/2408.00118", + "[65] Yun-Da Tsai, Mingjie Liu, and Haoxing Ren. 2024. Code Less, Align More: Efficient LLM Fine-tuning for Code Generation with Data Pruning. (2024). https://doi.org/10.48550/arXiv.2407.05040", + "[66] Shubham Ugare, Tarun Suresh, Hangoo Kang, Sasa Misailovic, and Gagandeep Singh. 2024. SynCode: LLM Generation with Grammar Augmentation. ArXiv Preprint (2024). https://arxiv.org/abs/2403.01632", + "[67] Pawel Urzyczyn. 1997. Inhabitation in Typed Lambda-Calculi (A Syntactic Approach). In TLCA (Lecture Notes in Computer Science). https://doi.org/10.1007/3-540-62688-3_47", + "[68] Heidi Vella. 2024. Google turns to AI to write new code; Workforce reduced. https://aibusiness.com/data/google-turns-to-ai-to-write-new-code-workforce-reduced", + "[69] Yuxiang Wei, Chunqiu Steven Xia, and Lingming Zhang. 2023. Copiloting the Copilots: Fusing Large Language Models with Completion Engines for Automated Program Repair. In ESEC/FSE. https://doi.org/10.1145/3611643.3616271", + "[70] Martin Weyssow, Xin Zhou, Kisub Kim, David Lo, and Houari A. Sahraoui. 2023. Exploring Parameter-Efficient Fine-Tuning Techniques for Code Generation with Large Language Models. arXiv Preprint (2023). https://doi.org/10.48550/arXiv.2308.10462", + "[71] Brandon T. Willard and Rémi Louf. 2023. Efficient Guided Generation for Large Language Models. arXiv Preprint (2023). https://doi.org/10.48550/arXiv.2307.09702" + ], + "bbox": [ + 90, + 119, + 907, + 895 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "Type-Constrained Code Generation with Language Models", + "bbox": [ + 88, + 83, + 495, + 97 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "171:25", + "bbox": [ + 862, + 83, + 905, + 95 + ], + "page_idx": 24 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[72] Andy Yang, David Chiang, and Dana Angluin. 2024. Masked Hard-Attention Transformers Recognize Exactly the Star-Free Languages. In NeurIPS. http://papers.nips.cc/paper_files/paper/2024/bit/13d7f172259b11b230cc5da8768abc5f-AAbstract-Conference.html", + "[73] An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. 2024. Qwen2.5 Technical Report. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2412.15115", + "[74] Quanjun Zhang, Chunrong Fang, Yang Xie, Yuxiang Ma, Weisong Sun, Yun Yang, and Zhenyu Chen. 2024. A Systematic Literature Review on Large Language Models for Automated Program Repair. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2405.01466" + ], + "bbox": [ + 88, + 119, + 911, + 231 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "A Detailed Prefix Automaton Definitions", + "text_level": 1, + "bbox": [ + 88, + 248, + 491, + 263 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "In this section, we provide more detailed definitions and analysis of the various automata for $L_{B}$ .", + "bbox": [ + 86, + 268, + 903, + 287 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "A.1 Base Automata", + "text_level": 1, + "bbox": [ + 88, + 300, + 286, + 313 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "We now provide detailed definitions for the base prefix automata introduced at the end of §3.2: union, concatenation, Kleene-Star, and terminal.", + "bbox": [ + 86, + 319, + 909, + 354 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Union. For the union $A_X \\cup A_Y$ , we define the resulting sets of initial states and accepting states as $I \\coloneqq I_X \\cup I_Y$ and $F \\coloneqq F_X \\cup F_Y$ , respectively. The transition function is defined as follows:", + "bbox": [ + 86, + 361, + 907, + 395 + ], + "page_idx": 25 + }, + { + "type": "equation", + "text": "\n$$\n\\delta (q, c) := \\left\\{ \\begin{array}{l l} \\delta_ {X} (q, c) & \\text {i f} q \\in Q _ {X} \\\\ \\delta_ {Y} (q, c) & \\text {i f} q \\in Q _ {Y}. \\end{array} \\right.\n$$\n", + "text_format": "latex", + "bbox": [ + 360, + 401, + 631, + 445 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "To show that the language parsed by this automaton is indeed the union $L(A_{X} \\cup A_{Y}) = L(A_{X}) \\cup L(A_{Y})$ , we employ a short helper lemma, which can be shown inductively.", + "bbox": [ + 86, + 450, + 907, + 486 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "LEMMA 7. The set of the reachable states from a set of states $\\mathbf{q}$ is equal to the union of reachable states from each state in $\\mathbf{q}$ , i.e., $\\gamma (\\mathbf{q},s) = \\bigcup_{q\\in \\mathbf{q}}\\gamma (q,s)$ .", + "bbox": [ + 88, + 493, + 909, + 531 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Since the states are distinct and we merely combine the transition functions of both automata, using the lemma, we can quickly see that the language parsed is indeed the union. Moreover, if both $A_{X}$ and $A_{Y}$ are prefix automata, this also holds for $A_{X} \\cup A_{Y}$ .", + "bbox": [ + 86, + 536, + 909, + 586 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Concatenation. For the concatenation automaton $A_{XY}$ , we define $I \\coloneqq I_X$ , $F \\coloneqq F_Y$ , and the transition function as follows:", + "bbox": [ + 86, + 595, + 907, + 627 + ], + "page_idx": 25 + }, + { + "type": "equation", + "text": "\n$$\n\\delta_ {X Y} (q, c) := \\left\\{ \\begin{array}{l l} \\delta_ {X} (q, c) & \\text {i f} q \\in Q _ {X} \\backslash F _ {X} \\\\ \\delta_ {X} (q, c) \\cup \\delta_ {Y} (I _ {Y}, c) & \\text {i f} q \\in F _ {X} \\\\ \\delta_ {Y} (q, c) & \\text {i f} q \\in Q _ {Y}. \\end{array} \\right.\n$$\n", + "text_format": "latex", + "bbox": [ + 286, + 631, + 707, + 694 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Informally, concatenation preserves the parsing behavior of both $A_{X}$ and $A_{Y}$ in their respective states. When $A_{XY}$ reaches an accepting state of $A_{X}$ and receives another input character, it either remains in $A_{X}$ or transitions to $A_{Y}$ , as defined in the second case of $\\delta_{XY}$ . Essentially, this maintains outgoing edges from accepting states in $A_{X}$ while adding edges from these accepting states to initial states of $A_{Y}$ .", + "bbox": [ + 86, + 698, + 907, + 781 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "It follows from a similar argument that $L(A_{XY}) = L(A_X) \\circ L(A_Y)$ , where $L(A_X) \\circ L(A_Y)$ is defined as $\\{s_X \\circ s_Y \\mid s_X \\in L(A_X), s_Y \\in L(A_Y)\\}$ . We first show $L(A_{XY}) \\subseteq L(A_X) \\circ L(A_Y)$ . Due to (P1), we can always split any $s \\in L(A_{XY})$ into $s_X$ that extends from $I_X$ to $F_X$ and $s_Y$ that extends from $I_Y$ to $F_Y$ . Then $s_X \\in L(A_X)$ and $s_Y \\in L(A_Y)$ . For $L(A_X) \\circ L(A_Y) \\subseteq L(A_X \\circ A_Y)$ , we pick any $s_X \\circ s_Y$ from $L(A_X) \\circ L(A_Y)$ and parse it using $A_{XY}$ . We observe that it will first traverse from $I_X$ to $F_X$ consuming $s_X$ , and then transition through $I_Y$ to $F_Y$ by consuming $s_Y$ .", + "bbox": [ + 86, + 781, + 907, + 881 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Moreover $A_{XY}$ is a prefix automaton, if $A_{X}$ and $A_{Y}$ are prefix automata and $L(A_{Y}) \\neq \\emptyset$ . Since $A_{X}$ is a prefix automaton, we can reach $F_{X}$ from any state in $Q_{X}$ . From $F_{X}$ we additionally reach", + "bbox": [ + 86, + 881, + 907, + 916 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "171:26", + "bbox": [ + 90, + 83, + 137, + 95 + ], + "page_idx": 25 + }, + { + "type": "header", + "text": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev", + "bbox": [ + 339, + 81, + 907, + 98 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "$I_{Y} \\subseteq Q_{Y}$ . Since $A_{Y}$ is a prefix automaton, we can reach $F_{Y}$ for any state in $Q_{Y}$ . This construction is a prefix automaton only if $I_{Y} \\neq \\emptyset$ , which, due to the prefix property, is equivalent to $L(A_{Y}) \\neq \\emptyset$ .", + "bbox": [ + 86, + 116, + 905, + 152 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Kleene-Star. We define the Kleene-Star automaton $A_{\\overline{X}}$ that parses indefinite repetitions of words accepted by $X$ . First, we consider all initial states as final states, i.e., we ensure $I_X \\subseteq F_{\\overline{X}}$ . Then we add transitions to the transition function $\\delta_X$ from the final states $F_X$ back to the initial states $I_X$ .", + "bbox": [ + 86, + 159, + 905, + 209 + ], + "page_idx": 26 + }, + { + "type": "equation", + "text": "\n$$\n\\delta_ {\\overline {{X}}} (q _ {X}, c) := \\left\\{ \\begin{array}{l l} \\delta_ {X} (q _ {X}, c) & \\text {i f q \\not \\in F _ {X}} \\\\ \\delta_ {X} (q _ {X}, c) \\cup \\delta (I _ {X}, c) & \\text {i f q _ {X} \\in F _ {X}}. \\end{array} \\right.\n$$\n", + "text_format": "latex", + "bbox": [ + 292, + 216, + 699, + 261 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "We can quickly see that $L(A_{\\overline{X}}) = \\{\\overline{s} \\mid s \\in L(A_X)\\}$ , with the same argument as the concatenation automaton. Additionally, because the initial states are accepting, the empty word (zero repetitions) is in $L(A_{\\overline{X}})$ . We similarly see that this is a prefix automaton if $A_{X}$ is a prefix automaton. Note that here $L(A_{X}) \\neq \\emptyset$ is not required. This is because if $L(A_{X}) \\neq \\emptyset$ , then $A_{\\overline{X}} = A_{X} = A_{\\emptyset}$ , which is still a prefix automaton.", + "bbox": [ + 86, + 266, + 905, + 350 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Terminals. The terminal automaton $A_{\\mathsf{S}}$ parses exactly the terminal S. They accept the usual alphabet $\\Sigma$ and feature the states $Q \\coloneqq \\{q_{\\mathsf{s}} \\mid \\mathsf{s} \\text{ is a suffix of S}\\}$ , $F \\coloneqq \\{q_{\\varepsilon}\\}$ , $I \\coloneqq \\{q_{\\mathsf{S}}\\}$ . The transition function $\\delta$ is defined as follows:", + "bbox": [ + 86, + 359, + 905, + 408 + ], + "page_idx": 26 + }, + { + "type": "equation", + "text": "\n$$\n\\delta (q _ {s}, c) := \\left\\{ \\begin{array}{l l} \\{q _ {s ^ {\\prime}} \\} & \\text {i f c \\circ s ^ {\\prime} = s} \\\\ \\varnothing & \\text {o t h e r w i s e .} \\end{array} \\right.\n$$\n", + "text_format": "latex", + "bbox": [ + 358, + 413, + 633, + 456 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Clearly $A_{\\mathfrak{S}}$ is a prefix automaton. We can show inductively that for any $s: \\gamma(q_{s}, s') = \\{q_{\\varepsilon}\\} \\Longleftrightarrow s = s'$ , and thus $L(A_{\\mathfrak{S}}) = \\{\\mathfrak{S}\\}$ . With a simple modification, we introduce $A_{\\mathfrak{s}}^{W}$ , where $W$ denotes whitespace characters. The transition function is defined as $\\delta(q_{\\mathfrak{s}}^{W}, c) := \\{q_{\\mathfrak{s}}^{W}\\}$ if $c \\in W$ ; otherwise, $\\delta(q_{c \\circ s}^{W}, t) := \\{q_{\\mathfrak{s}}^{W}\\}$ . This allows arbitrary whitespace before parsing $s$ . This is how we implement syntactic indifference to whitespace between terminals.", + "bbox": [ + 86, + 463, + 905, + 547 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "A.2 Expressions", + "text_level": 1, + "bbox": [ + 88, + 559, + 255, + 577 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Expressions are parsed using recursive automatons as introduced in §3.4. In this part of the Appendix, we describe in more detail how information is passed between states.", + "bbox": [ + 86, + 581, + 905, + 613 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Notation. In the following, we will implicitly assume that $\\delta(q, c) = \\emptyset$ if not explicitly defined otherwise, making notation more concise. For any state, we access the following information through dot notation or the special notation on the state, which we assume is passed to subsequent states through the transition function (unless otherwise stated). This information is alternatively passed through to entire automata in composite automata, e.g., in $A_{XY}$ from $A_X$ to $A_Y$ .", + "bbox": [ + 86, + 623, + 905, + 706 + ], + "page_idx": 26 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- $q \\in F_X$ : Whether state $q$ is an accepting state of the automaton $A_X$ .", + "- $q. \\Gamma$ : The type environment based on state $q$ currently being parsed.", + "- $q$ .LHS: The left-hand side expression of an extending expression represented by state $q$ , i.e., when extending $X$ with $Y$ and currently parsing $q_{Y}$ , then $q_{Y}$ .LHS = $X$ .", + "- $q$ .TYP: The described type of the last coherent expression that this state belongs to. This is only defined for accepting states. Generally, we ensure that when some expression $e$ was parsed, the corresponding state $q_{e}$ has attribute $q_{e}$ .TYP such that $q_{e} \\Gamma \\vdash e : q_{e}$ .TYP.", + "- $q \\downarrow T$ : Type $T$ to which state $q$ is constrained." + ], + "bbox": [ + 106, + 711, + 905, + 843 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "When accessing the properties of $A$ , we access the property of the current state of the automaton $q \\in Q$ , e.g., $A. \\mathrm{LHS} = q. \\mathrm{LHS}$ . For parsed automata, the current state is the final, accepting state. The TYP attribute expresses the type of the expression parsed so far. In expression states $q$ , we leverage the LHS to accurately determine $q. \\mathrm{TYP}$ .", + "bbox": [ + 86, + 848, + 905, + 915 + ], + "page_idx": 26 + }, + { + "type": "header", + "text": "Type-Constrained Code Generation with Language Models", + "bbox": [ + 88, + 81, + 495, + 95 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "171:27", + "bbox": [ + 862, + 83, + 905, + 95 + ], + "page_idx": 26 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} q _ {\\mathrm {S T R . T Y P}} := \\text {s t r i n g} \\quad q _ {(e) . \\mathrm {T Y P}} := A _ {e}. \\mathrm {T Y P} \\\\ q _ {\\text {N U M}}. \\text {T Y P} := \\text {n u m b e r} \\quad q _ {\\odot e}. \\text {T Y P} := R, \\text {f o r} q _ {\\odot e}. \\text {L H S}. \\text {T Y P} = S, A _ {e}. \\text {T Y P} = T \\text {a n d} S \\odot T: R \\\\ q _ {\\text {B O O L . T Y P}} := \\text {b o o l e a n} \\quad q _ {(\\overline {{e}}). \\text {T Y P}} := T, \\text {f o r} q _ {(\\overline {{e}}). \\text {L H S . T Y P}} = (\\overline {{p}}) \\Rightarrow T \\\\ q _ {x. \\mathrm {T Y P}} := T \\text {w h e r e} q _ {x}. \\Gamma \\vdash x: T \\quad q. n. \\mathrm {T Y P} := T, \\text {f o r L O O K U P} (q. n. \\mathrm {L H S . T Y P}, n) = T \\\\ q _ {(\\overline {{p}}) = > e. T Y P} := \\left(A _ {\\overline {{p}}} ^ {-}. T Y P\\right) = > A _ {e}. T Y P \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 111, + 116, + 897, + 202 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Unrestricted Expressions. The left-hand side of the currently parsed expression is used in the definition of automata for three extending expressions; arithmetic operators, function call, and member access. The arithmetic operator automaton constrains its states to those with valid operators, i.e.:", + "bbox": [ + 86, + 209, + 909, + 275 + ], + "page_idx": 27 + }, + { + "type": "equation", + "text": "\n$$\nA_{\\odot e}:= \\bigcup_{\\exists R:A_{\\odot e}.LHS.TYP\\odot T = R}A_{\\odot}(\\circ A_{e}\\downarrow T).\n$$\n", + "text_format": "latex", + "bbox": [ + 333, + 277, + 658, + 313 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "For function call, the automaton is only valid if the left-hand side is a function, and accepts only the valid signature.", + "bbox": [ + 86, + 316, + 907, + 350 + ], + "page_idx": 27 + }, + { + "type": "equation", + "text": "\n$$\nA _ {(\\overline {{e}})} := \\left\\{ \\begin{array}{l l} A _ {(} \\circ (A _ {\\overline {{e}}} \\downarrow A _ {\\overline {{p}}}. \\mathrm {T Y P}) \\circ A _ {)} & \\text {i f} A _ {(\\overline {{e}}). \\mathrm {L H S . T Y P}} = (\\overline {{p}}) \\Rightarrow T \\\\ A _ {\\emptyset} & \\text {o t h e r w i s e .} \\end{array} \\right.\n$$\n", + "text_format": "latex", + "bbox": [ + 232, + 356, + 761, + 400 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Finally, the member access automaton is a union of the automata that parses the attributes of the left-hand side expression. Or formally,", + "bbox": [ + 86, + 406, + 907, + 440 + ], + "page_idx": 27 + }, + { + "type": "equation", + "text": "\n$$\nA_{\\cdot n}:= \\bigcup_{\\exists T:\\text{LOOKUP}(A_{\\cdot n}.LHS.TYP},m) = T}A_{\\cdot \\mathfrak{m}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 345, + 445, + 648, + 484 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Type-Restricted Expressions. The type-restricted versions of the automata are covered by the definitions presented in §3.4. We therefore do not separately list them here.", + "bbox": [ + 86, + 491, + 907, + 525 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "A.3 Pruning the Type Search", + "text_level": 1, + "bbox": [ + 86, + 538, + 376, + 554 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "We now present our heuristic for pruning the type search recursion from the prefix automaton for type-constrained expressions in §3.4, i.e., our implementation of PRUNESEARCH at Line 6 of Algorithm 2. The heuristic is based on the complexity and novelty of candidate types to explore.", + "bbox": [ + 86, + 558, + 909, + 606 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Based on the assumptions about the lookup function and the extension expressions in §3.1, we observe a restriction in the reachable types by extensions: from any given type, we reach itself, result types of arithmetic operators via op, return types through CALL, and member types through MEMBER. A higher-order type $(\\cdot) \\Rightarrow T$ does not allow access to types not reachable from $T$ . Consequently, we avoid exploring such higher-order types unless the target type is of higher order, or the higher-order type offers novel, yet unexplored types. For instance, in Figure 11, the type $(\\cdot) \\Rightarrow$ number is not explored, because it is more complex than both the initial and goal types, number and string, and does not contain any unexplored type. Meanwhile, $(\\cdot) \\Rightarrow$ string is explored, as it contains a novel string type.", + "bbox": [ + 86, + 608, + 909, + 758 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "To formalize this understanding, we introduce the concepts about the depth and root types of a given type, denoted as $\\text{DEPTH}$ and $\\text{ROOT}$ , respectively. $\\text{DEPTH}$ measures the complexity of a type, specifically the order of a function, while $\\text{ROOT}$ returns all types of minimal depth (e.g., string, number, and boolean) that constitute a higher-order type. They are defined as follows:", + "bbox": [ + 86, + 758, + 909, + 825 + ], + "page_idx": 27 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {D E P T H} (T) := \\left\\{ \\begin{array}{l l} \\operatorname {D E P T H} (S) + 1 & \\text {i f} T = (\\overline {{p}}) \\Rightarrow S, \\\\ 0 & \\text {o t h e r w i s e .} \\end{array} \\right. \\qquad \\operatorname {R O O T} (T) := \\left\\{ \\begin{array}{l l} \\operatorname {R O O T} (S) & \\text {i f} T = (\\overline {{p}}) \\Rightarrow S, \\\\ \\{T \\} & \\text {o t h e r w i s e .} \\end{array} \\right.\n$$\n", + "text_format": "latex", + "bbox": [ + 117, + 830, + 890, + 875 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "We leverage DEPTH and ROOT to implement PRUNESEARCH $(T,G,S)$ for a current type $T$ , a goal type $G$ , and a type $S$ after an extension is applied on $T$ . In general, if $G$ is not directly accessible", + "bbox": [ + 86, + 881, + 907, + 915 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "171:28", + "bbox": [ + 90, + 83, + 137, + 95 + ], + "page_idx": 27 + }, + { + "type": "header", + "text": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev", + "bbox": [ + 339, + 81, + 907, + 97 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/e4f0d79317e2cd79bc15fc30c4d4aeea45c8d0e676e0e7d6a6cc2bb0c53bb505.jpg", + "image_caption": [ + "Figure 11. An example search through the graph for type reachability, starting from $T =$ number with the goal string, e.g., after parsing let x : string; $x = 1$ . States and edges along the final path are marked in green and explored nodes in blue. The () => number node is not explored, as complex types are avoided by our heuristic. The node () => string is explored as it enables reaching new type string." + ], + "image_footnote": [], + "bbox": [ + 248, + 113, + 744, + 241 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "from $T$ , it will also not be accessible from expressions with the same root types but greater depth, such as $() \\Rightarrow T$ . When $G$ is of higher order, exploring up to the depth of $G$ can be required, such as when $G = () \\Rightarrow ((.) => \\text{number})$ . Based on these two ideas, we stop exploring $S$ when $\\text{DEPTH}(S) > \\max(\\text{DEPTH}(G), \\text{DEPTH}(T))$ .", + "bbox": [ + 86, + 356, + 907, + 425 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Further, if a higher-depth function returns an unexplored type, we need to explore it. Sticking to the example in Figure 11, type number has the member toString of type () => string. The type string can only be reached by exploring the member access at depth 1. On the contrary, we do not explore a higher-depth function if it does not introduce novel types other than those explored. To achieve this, we adapt Algorithm 2 to additionally define a set of root types $R$ , which is initialized to an empty set and is updated by $R := R \\cup \\mathrm{root}(T)$ . We do not explore $S$ if $\\mathrm{root}(S) \\subseteq R$ .", + "bbox": [ + 86, + 425, + 909, + 525 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Taking the conjunction of the aforementioned two aspects, our pruning heuristic is implemented as PRUNESEARCH $(T,G,S) \\coloneqq \\mathrm{DEPTH}(S) > \\max(\\mathrm{DEPTH}(T), \\mathrm{DEPTH}(S)) \\wedge \\mathrm{ROOT}(S) \\subseteq R$ . The restrictions based on depth and root types are based on the results of the rigorously analyzed search over succinct types by Gvero et al. [30]. This provides a robust heuristic for exploring as many relevant inhabitable types as possible. However, due to the additional complexity introduced by the lookup function, we can not guarantee completeness and instead refer to the strong empirical results in our evaluation in §5 as evidence of the search's high coverage.", + "bbox": [ + 86, + 525, + 909, + 641 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "A.4 Implementation of DERIVABLE", + "text_level": 1, + "bbox": [ + 88, + 658, + 419, + 675 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Recall that in Table 1, DERIVABLE for function expressions are defined as: $\\mathrm{DERIVABLE}(q_{(\\overline{p})\\Rightarrow e})\\coloneqq \\{(\\overline{p})\\Rightarrow T\\mid \\mathrm{REACHABLE}(\\mathrm{DERIVABLE}(q_e),T)\\}$ . This involves constructing a type reachability graph and collecting all types $T$ reachable from DERIVABLE $(q_{e})$ . However, this process is intractable because $T$ can be of arbitrarily high-order, as such there are infinitely many $T$ to explore. A similar issue exists for grouped expressions, as their DERIVABLE function is also defined to enumerate reachable types. We introduce two optimization heuristics to address this problem.", + "bbox": [ + 86, + 679, + 907, + 779 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "We first observe that DERIVABLE is always called within the context of an invocation of REACHABLE with target type $G$ , e.g., REACHABLE(DERIVABLE(q(\\overline{p}) => e), G) for function expressions. To compute DERIVABLE(q(\\overline{p}) => e), we enumerate all types present on the type graph represented by REACHABLE(DERIVABLE(q_e), G), which is finite due to application of the pruning heuristics in Appendix A.3. In other words, we bound the maximum complexity of considered types $T$ using the pruning heuristic for reachability of target type $G$ . This leads to a sound but potentially incomplete version of DERIVABLE. However, since the final goal is to reach $G$ , this heuristic provides a practically useful set of all relevant derivable types.", + "bbox": [ + 86, + 779, + 909, + 913 + ], + "page_idx": 28 + }, + { + "type": "header", + "text": "Type-Constrained Code Generation with Language Models", + "bbox": [ + 88, + 81, + 495, + 97 + ], + "page_idx": 28 + }, + { + "type": "page_number", + "text": "171:29", + "bbox": [ + 862, + 83, + 907, + 95 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Second, we observe that the resulting two-tiered call REACHABLE( DERIVABLE $(q_{(\\overline{p})} \\Rightarrow e)$ , $G$ ) can be integrated into a single call to further reduce the amount of explored types. Concretely, when discovering some type $M$ in REACHABLE( DERIVABLE $(q_e)$ , $G$ ), as per the previous heuristic, we allow transitioning directly to REACHABLE $(\\overline{p}) \\Rightarrow M, G$ to allow a depth-prioritizing exploration of the search graph. This allows us to efficiently discover a path to $G$ if it exists.", + "bbox": [ + 86, + 118, + 905, + 202 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "A.5 Statements", + "text_level": 1, + "bbox": [ + 88, + 215, + 248, + 230 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "We define the remaining automata to capture the complete language from §3.1. To correctly handle function return types, we pass on related information when entering function bodies:", + "bbox": [ + 86, + 236, + 905, + 269 + ], + "page_idx": 29 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- $q.R$ : The expected return type of the current state $q$ .", + "- $q.$ RETURNED: Whether the currently parsed program block has returned in all branches.", + "- q.MUSTRETURN: Whether the currently parsed program block must return (i.e., If-Then-Else branches do not need to contain return statements even if a return type is expected of the surrounding code block)." + ], + "bbox": [ + 106, + 275, + 905, + 356 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "The single statement automaton is another recursive definition, since some statements, e.g., If-Then-Else, can themselves contain statements. The statement automaton is defined recursively as $A_{s} \\coloneqq A_{\\mathrm{DECL}} \\cup A_{\\mathrm{EXPR}} \\cup A_{\\mathrm{RET}} \\cup A_{\\mathrm{BLOCK}} \\cup A_{\\mathrm{FUN}} \\cup A_{\\mathrm{ITE}}$ . The expression statement automaton and block automaton are simply defined as $A_{\\mathrm{EXPR}} \\coloneqq A_{e}$ ; and $A_{\\mathrm{BLOCK}} \\coloneqq A_{\\{\\overline{s}\\}}$ . The declaration automaton $A_{\\mathrm{DECL}} \\coloneqq A_{\\mathrm{let} x:T}$ ; captures variable names $x$ using an automaton for non-existing identifiers, which works the same way as $A_{x}$ except that it rejects terminals that match an existing variable. This automaton is a prefix automaton as well, since indefinite additional characters can be added to the variable name and there are only finitely many defined variables. The If-Then-Else automaton is defined using standard concatenation: $A_{\\mathrm{ITE}} \\coloneqq A_{\\mathrm{if}(e) s \\text{else}s}$ . The statements automaton $A_{\\overline{s}}$ , based on the Kleene-Star automaton definition and the single statement automaton. Return statements are only non-empty when the expected return type is set, i.e. when parsing inside a function:", + "bbox": [ + 86, + 362, + 907, + 545 + ], + "page_idx": 29 + }, + { + "type": "equation", + "text": "\n$$\nA _ {\\mathrm {R E T}} := \\left\\{ \\begin{array}{l l} A _ {\\mathrm {r e t u r n}} \\circ A _ {e} \\downarrow T & \\text {i f} A _ {\\mathrm {R E T}}. R = T \\\\ A _ {\\emptyset} & \\text {o t h e r w i s e .} \\end{array} \\right.\n$$\n", + "text_format": "latex", + "bbox": [ + 316, + 552, + 676, + 597 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "For functions, the automaton is based on the standard concatenation $A_{\\text{FUN}} \\coloneqq A_{\\text{function } x(\\overline{p}):T(\\overline{s})}$ . However, the transition function updates the states of the statement automata inside the function:", + "bbox": [ + 86, + 605, + 909, + 637 + ], + "page_idx": 29 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- $q.R \\coloneqq T$ , i.e., the return type of these statements is set to the return type of the function. This value is propagated recursively to all sub-automata.", + "- $q$ .MUSTRETURN := true, for the outermost statement block automaton. It is set to false for deeper nested statement blocks and as soon as a parsed statement $X$ has $q_{X}$ .RETURNED set to true - i.e. one of the main body statements returned in every branch.", + "- $q. \\text{RETURNED} :=$ false, per default in every statement, except a) in return automata, b) inside a multi-statement automaton where the previous statement has RETURNED = true and c) in ITE-automata where both branching statements have RETURNED = true." + ], + "bbox": [ + 106, + 641, + 905, + 775 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "As long as a state $q$ in a multi-statement automaton has $X. \\text{RETURNED} = \\text{false}$ and $q. \\text{MUSTRETURN} = \\text{true}$ , it can not accept but instead forces the generation of another statement. Since we can always express the requested type through literals and can always generate a return statement to fulfill this requirement, the prefix automaton property is not violated.", + "bbox": [ + 86, + 780, + 921, + 847 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "B Details about Experimental Evaluation", + "text_level": 1, + "bbox": [ + 88, + 859, + 491, + 876 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "In this section, we detail how executable code is extracted from the model responses and a slight modification to the decoding algorithm used, that increases throughput heuristically.", + "bbox": [ + 86, + 881, + 905, + 915 + ], + "page_idx": 29 + }, + { + "type": "page_number", + "text": "171:30", + "bbox": [ + 90, + 83, + 137, + 95 + ], + "page_idx": 29 + }, + { + "type": "header", + "text": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev", + "bbox": [ + 339, + 81, + 907, + 95 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Implementation Details. We have two main external dependencies. To implement the regular-expression-based literal automata, we leverage the regex library, as it allows checking if the current string can be completed to match a regular expression. To implement LLM inference, we leverage the transformers library. We provide an exhaustive list of supported and unsupported features of the TypeScript language in our final implementation in Tables 5 and 6, respectively.", + "bbox": [ + 86, + 116, + 911, + 202 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Hyperparameters. We run the models on A100 NVidia GPUs with 80 GB of VRAM and CUDA version 12.4. We set the sampling temperature to 1. We set seeds to 0 to 4 on the four HumanEval runs and 0 on the one MBPP run, respectively. We limit the completions to 1000 tokens and time out after 300 seconds. We compute syntactic correctness using the Oxidation toolchain [52] as the official TypeScript compiler does not clearly distinguish between syntactic and semantic errors.", + "bbox": [ + 86, + 209, + 909, + 294 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Excluded MBPP Instances. We discovered that a number of TypeScript translations in the MultiPL-E dataset [13] contained invalidly generated nested tuples. After reporting them to the developers, they have been resolved in the latest version of MBPP and we include them in our evaluation. Still, we find that the TypeScript translation of a number of MBPP instances contains too broad type annotation, annotating elements as any or array of any. We therefore exclude the following 6 instances from the evaluation:", + "bbox": [ + 86, + 300, + 909, + 401 + ], + "page_idx": 30 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- mbpp_405_check_tuplex\n- mbpp_612_merge", + "- mbpp_563extract_values -mbpp_725.extract_quotation", + "- mbpp_580.extract_even\n- mbpp_791_removeNSTed" + ], + "bbox": [ + 150, + 405, + 763, + 452 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Complete Prompts. We provide the complete LLM prompts for our evaluated tasks (synthesis, translation, and repair) in Figures 12-14. The prompts are templates, instantiated with instructions specific to each task and problem instance. If system prompts are not available for a given LLM, we pretend the system prompt to the first user prompt. The model completion starts from a pre-filled function signature, enabling unified unit testing. For the repair prompt, we add the non-compilable model output as assistant output and use a second turn to pass back compiler outputs. Compiler errors contain line numbers for localization, so we annotate the output with line numbers. We find that Qwen2.5 32B tends to always generate test cases, which leads to errors during compilation. We therefore append the sentence Do not include test cases in the code. to its prompt.", + "bbox": [ + 86, + 459, + 911, + 609 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Extracting Output Code. Given our prompts, LLMs are expected to output the resulting programs. However, they often produce additional outputs, such as generated test cases and explanations. Now we describe our heuristics for extracting the generated code. We first extract the corresponding TypeScript code block (i.e., ``` typescript`, or do not cut off if the block is not closed. Inside the code block, we cut off after the closing curly brace of the last balanced pair of curly braces, if it is followed by a newline or semicolon. This determines the last statement block generated, and avoids cutting off, e.g., inside a template literal. Again, if no such case is found, we do not prune the output. We demonstrate the operation of our cutoff heuristics in Figure 15.", + "bbox": [ + 86, + 618, + 911, + 752 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "C Case Study Full Outputs", + "text_level": 1, + "bbox": [ + 88, + 762, + 355, + 779 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "In §5.4, we present the shortened versions of three qualitative examples showcasing the effectiveness of our approach. In Figures 16-18, we provide the full code outputs of these examples, with detailed descriptions in the respective captions.", + "bbox": [ + 86, + 784, + 909, + 834 + ], + "page_idx": 30 + }, + { + "type": "header", + "text": "Type-Constrained Code Generation with Language Models", + "bbox": [ + 88, + 81, + 495, + 97 + ], + "page_idx": 30 + }, + { + "type": "page_number", + "text": "171:31", + "bbox": [ + 862, + 83, + 905, + 95 + ], + "page_idx": 30 + }, + { + "type": "page_footnote", + "text": "3https://pypi.org/project/regex/", + "bbox": [ + 88, + 886, + 304, + 901 + ], + "page_idx": 30 + }, + { + "type": "page_footnote", + "text": "4 https://huggingface.co/docs/transformers", + "bbox": [ + 88, + 901, + 378, + 913 + ], + "page_idx": 30 + }, + { + "type": "table", + "img_path": "images/0c4baffe0fc9edc7385e27cc94d0c45ae5838eae71ab014be51ed0f83954a62d.jpg", + "table_caption": [ + "Table 5. Supported TypeScript features." + ], + "table_footnote": [], + "table_body": "
Supported TypeScript FeaturesExamples
Expressions, Statements, Function Declarations(LB as introduced in §3)
Additional Literals: BigInt, Regex, Template Strings10n, /\\d*, 'hello ${user}'
Additional Types: void, null, undefinedvoid, undefined, null
Index Signature Types and Literalslet x: {{y: number}: string} = 1: "hi";
Anonymous Functionsfunction(): bool {return true}
Lambda Functions with and without Function Bodiesx => {return y}, x => y
Ternary and LogicOperators? :, |, &&
Arithmetic and Boolean Operations+, -, **, &, !
Assigning Pre-and Postfix Operators++, --
Arrays[1, 2, 3]
Access and Assignment to Computed Membersx[10] = y[i];
Constructors and "new" Callslet x = new Number(1);
Calls with Optional and Rest Parametersfunction foo(x?: number, y...: string)
Sets and MapsMap<string, number>}()
Parameterized Constructor Callsnew Set<string>}()
Tupleslet x: [int, string] = [1, "hello"];
Optional Chainingx.get("hi").get("world")
Spread Operator[...xs]
Type Assertions"hello" as any
For Loopsfor(int x = 0; i < 10; i++)
For Of Loopsfor(x of xs)
For Of Loops with Tuple Destructuringfor([x, y] of xys)
Do-While and While Loopswhile (true) {...}
Typed and Untyped Variable Declarationslet x: number = 1; let y = 100;
Comments, Multiline Comments// Comment
Returning without Expressionsreturn;
Try-Catch Statements with a Fixed Exception Typetry {...} catch (e) {...}
Throw Statementsthrow new Error("..."
Importing the crypto Libraryrequire("crypto")
Global Scope ObjectsMath, parseInt
Automatic Semicolon Insertion
", + "bbox": [ + 90, + 134, + 903, + 640 + ], + "page_idx": 31 + }, + { + "type": "table", + "img_path": "images/1f1a04cc743632903100ca0c76f3e776b8f11d465997df71f1a7f2b79743c678.jpg", + "table_caption": [ + "Table 6.Unsupported TypeScript features." + ], + "table_footnote": [], + "table_body": "
Missing FeaturesExamples
General Library Importsrequire("example")
Use of Functions Before Declaration
For In Loopsfor(x in y)
Type Declaration
User-Defined Classes
Declaration and Parameterized Call of General Parameterized Functions
Destructuring Assignment[x, y] = z
Uninitialized, Unannotated Variable Declarationslet x;
Return Type Inference
Literal Types
Enumerables
Symbols
", + "bbox": [ + 92, + 688, + 903, + 904 + ], + "page_idx": 31 + }, + { + "type": "page_number", + "text": "171:32", + "bbox": [ + 92, + 83, + 137, + 95 + ], + "page_idx": 31 + }, + { + "type": "header", + "text": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev", + "bbox": [ + 339, + 81, + 907, + 97 + ], + "page_idx": 31 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "System: \nYou are an expert in TypeScript programming. Solve the given problem by writing solution code in TypeScript. When answering, insert the solution code in a \\*\\*typescript... block. Do not include test cases in the code.. \nUser: \nCheck if in given array of numbers, are any two numbers closer to each other than given threshold. \n>>> has_close_elements([1.0, 2.0, 3.0], 0.5) \nfalse \n>>> has_close_elements([1.0, 2.8, 3.0, 4.0, 5.0, 2.0], 0.3) \ntrue function \nAssistant: \n``~typescript \nfunction has_close_elements(numbers: number[], threshold: number): boolean {", + "guess_lang": "typescript", + "bbox": [ + 106, + 119, + 890, + 362 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Figure 12. The full prompt for the synthesis task. Text in green is based on the problem instance, in this case HumanEval #0. The red sentence is only added for Qwen2.5 32B due to its tendency to generate non-compiling test cases.", + "bbox": [ + 88, + 370, + 907, + 423 + ], + "page_idx": 32 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "System: \nYou are a helpful and expert programmer in Python and TypeScript. You will be given an input program in Python and your task is to translate this program into TypeScript. You may assume that the input program is correct and that the translation should be semantically equivalent. When answering, insert the solution code in a \\*\\*typescript... block. Do not include test cases in the code.. \nUser: \nThe following is the source program in Python: \n``python \nfrom typing import List \ndef has_close_elements(numbers: List[float], threshold: float) -> bool: for idx, elem in enumerate(numbers): for idx2, elem2 in enumerate(numbers): if idx != idx2: distance = abs(elem - elem2) if distance < threshold: return True return False \nPlease translate the source program to TypeScript. \nAssistant: \n``'typescript \nfunction has_close_elements(numbers: number[], threshold: number): boolean {", + "guess_lang": "python", + "bbox": [ + 106, + 437, + 864, + 834 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Figure 13. The full prompt for the translation task. Text in green is based on the problem instance, blue is the original Python canonical solution, in this case HumanEval #0. The red sentence is only added for Qwen2.5 32B due to its tendency to generate non-compiling test cases.", + "bbox": [ + 88, + 844, + 907, + 898 + ], + "page_idx": 32 + }, + { + "type": "header", + "text": "Type-Constrained Code Generation with Language Models", + "bbox": [ + 88, + 83, + 493, + 95 + ], + "page_idx": 32 + }, + { + "type": "page_number", + "text": "171:33", + "bbox": [ + 862, + 83, + 905, + 94 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "System:", + "bbox": [ + 106, + 148, + 164, + 161 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "You are an expert in JavaScript programming. Solve the given problem by writing solution code in JavaScript. When answering, insert the solution code in a `````````````````````````````````````````````````````````````````````````````````````````````````````", + "bbox": [ + 121, + 163, + 864, + 188 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "User:", + "bbox": [ + 106, + 205, + 148, + 215 + ], + "page_idx": 33 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Given an array of positive integers x. return a sorted array of all elements that hasn't any even digit. \nNote: Returned array should be sorted in increasing order. \nFor example: \n>>unique_digits([15,33,1422,1]) \n[1, 15, 33] \n>>unique_digits([152, 323, 1422, 10]) \n[]", + "guess_lang": "txt", + "bbox": [ + 127, + 218, + 666, + 350 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Assistant:", + "bbox": [ + 106, + 365, + 189, + 376 + ], + "page_idx": 33 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "001: function unique_digits(x: number[]) : number[] { \n002: const filteredArray: number[] = []; // initialize empty array \n003: for (let i = 0; i < x.length; i++) { \n004: let strx = \"\" + x[i]; \n005: let filteredStr = strx \n006: .ToString() \n007: .split(\"\"); \n008: .filter((digit) => parseInt(digit) % 2 != 0); \n009: if (filteredStr.length > 0) { \n010: filteredArray.push(...filteredStr); \n011: } \n012: } \n013: return filteredArray.sort((a, b) => a - b); \n014: }", + "guess_lang": "typescript", + "bbox": [ + 125, + 391, + 676, + 631 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "User:", + "bbox": [ + 106, + 652, + 150, + 663 + ], + "page_idx": 33 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "This output produced an error: \n../../../.././tmp/tmpaewlckfv.ts(10,26): error TS2345: Argument of type 'string' is not assignable to parameter of type 'number'.", + "guess_lang": "txt", + "bbox": [ + 125, + 665, + 831, + 711 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Write the program again, and make sure to fix the error this time.", + "bbox": [ + 121, + 726, + 646, + 738 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Assistant:", + "bbox": [ + 106, + 754, + 189, + 766 + ], + "page_idx": 33 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "```\n```script\nfunction unique_digits(x: number[]) : number[] {", + "guess_lang": "typescript", + "bbox": [ + 125, + 768, + 506, + 795 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Figure 14. The full prompt for the repair task, where we simulate a multi-turn interaction. Text in green is based on the problem instance. blue is non-compilable model output from the synthesis step, in this case HumanEval #104 and Gemma 2 2B. red is compiler feedback. Generations from the synthesis step are annotated with line numbers to aid localizing the compiler error.", + "bbox": [ + 86, + 809, + 905, + 881 + ], + "page_idx": 33 + }, + { + "type": "page_number", + "text": "171:34", + "bbox": [ + 92, + 84, + 135, + 94 + ], + "page_idx": 33 + }, + { + "type": "header", + "text": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev", + "bbox": [ + 339, + 83, + 905, + 97 + ], + "page_idx": 33 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "```\n```\nfunction next_smallest PALINDrome(num: number): number {\n const numStr = num.toString();\n const reversedNumStr = numStr.splitAGING).reverse().joinAGING);\n let min = Math.min(Number(numStr), Number(reversedNumStr));\n}\nif (min === Number(numStr)) {\n return Number(numStr);\n}\nelse {\n return parseInt(min.toString() + 1);\n}\n}\n}\nconst findNextSmallestPalindrome = next_smallest PALINDrome(12321);\nconsole.logfindNextSmallestPalindrome); // Output: 12322\n}\n**Explanation**\nThe function takes a number as input (^num>) in the given criteria.", + "guess_lang": "txt", + "bbox": [ + 94, + 290, + 767, + 612 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Figure 15. The code generated by Gemma 2 2B for MBPP #0. The cutoff algorithm first extracts the fenced code, last line marked in blue. To avoid the usage of the console object from the browser extension of TypeScript in the generated examples, we further cut off after the last code block, marked in red.", + "bbox": [ + 88, + 618, + 911, + 672 + ], + "page_idx": 34 + }, + { + "type": "header", + "text": "Type-Constrained Code Generation with Language Models", + "bbox": [ + 88, + 83, + 493, + 95 + ], + "page_idx": 34 + }, + { + "type": "page_number", + "text": "171:35", + "bbox": [ + 862, + 83, + 907, + 95 + ], + "page_idx": 34 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "1 function select_words(s: string, n: number): string[] { \n2 const result: string[] = []; \n3 for (const word of s.split()) { \n4 let nConsonants = 0; \n5 for (let i = 0; i < word.length; i++) { \n6 const char = word[i].toLowerCase(); \n7 if (!['a', 'e', 'i', 'o', 'u'].includes(char)) { \n8 nConsonants++; \n9 } \n10 } \n11 if (nConsonants === n) { \n12 result.push(word); \n13 } \n14 } \n15 return result; \n16 }", + "guess_lang": "typescript", + "bbox": [ + 94, + 152, + 553, + 398 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "(a) The code generated by the unconstrained Gemma 2 2B when translating a Python solution. The model tries to call the split member of a string object without parameters. This is legal in Python but not allowed in TypeScript.", + "bbox": [ + 90, + 402, + 907, + 450 + ], + "page_idx": 35 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "1 function select_words(s: string, n: number): string[] { \n2 const result: string[] = []; \n3 for (const word of s.split()) { \n4 let nConsonants = 0; \n5 for (let i = 0; i < word.length; i++) { \n6 const letter = word[i].toLowerCase(); \n7 if (!['a', 'e', 'i', 'o', 'u'].includesletter)) { \n8 nConsonants++; \n9 } \n10 } \n11 if (nConsonants === n) { \n12 result.push(word); \n13 } \n14 } \n15 return result; \n16 }", + "guess_lang": "typescript", + "bbox": [ + 94, + 473, + 561, + 759 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "(b) The code generated by Gemma 2 2B with type constraints. The constraints guide the model and help it generate the required argument to split, which determines the characters around which to split the string.", + "bbox": [ + 90, + 766, + 907, + 795 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Figure 16. Comparison between the function generated by unconstrained Gemma 2 2B [64] and the code generated with the guidance of type constraints on the problem select_words in HumanEval [14]. This problem requires a function that selects words from a given list that contain a specified number of consonants.", + "bbox": [ + 90, + 818, + 907, + 865 + ], + "page_idx": 35 + }, + { + "type": "page_number", + "text": "171:36", + "bbox": [ + 92, + 84, + 135, + 94 + ], + "page_idx": 35 + }, + { + "type": "header", + "text": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev", + "bbox": [ + 339, + 83, + 905, + 95 + ], + "page_idx": 35 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "function largest_divisor(n: number): number {\nfor(let i = n - 1; i > 0; i--) {\nif(n % i == 0) {\nreturn i;\n}\n}\n}", + "guess_lang": "typescript", + "bbox": [ + 96, + 287, + 477, + 395 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "(a) The code generated by the unconstrained Deepseek Coder 33B Instruct. The function loops over all numbers smaller than $n$ to determine the largest divisor. Even though functionally correct for positive numbers, this function returns undefined when the return statement in the loop is never called (e.g., for negative inputs), violating the type system rules.", + "bbox": [ + 86, + 400, + 907, + 461 + ], + "page_idx": 36 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "function largest_divisor(n: number): number {\nfor(let i = n - 1; i > 0; i--) {\nif(n % i == 0) {\nreturn i;\n}\n}\nreturn 1; // Return 1 if no such divisor found\n}", + "guess_lang": "typescript", + "bbox": [ + 96, + 484, + 512, + 609 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "(b) The code generated by DeepseekCoder 33B Instruct with type constraints. The system ensures that every path returns a value matching the annotated type. This forces the LLM to generate another line after the loop, and the model correctly chooses to generate a fallback return statement.", + "bbox": [ + 86, + 613, + 905, + 659 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Figure 17. Comparison between the function generated by the unconstrained DeepSeekCoder 33B Instruct [28] and the code generated with the guidance of type constraints on the problem largest_divisor in HumanEval [14]. The problem asks to compute the largest divisor of a given number.", + "bbox": [ + 86, + 683, + 907, + 727 + ], + "page_idx": 36 + }, + { + "type": "header", + "text": "Type-Constrained Code Generation with Language Models", + "bbox": [ + 88, + 83, + 493, + 95 + ], + "page_idx": 36 + }, + { + "type": "page_number", + "text": "171:37", + "bbox": [ + 862, + 83, + 905, + 94 + ], + "page_idx": 36 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "function intersperse(numbers: number[], delimiter: number): number[] { \nreturn numbers.reduce((acc, curr, index) => { \nacc.push(curr); \nif (index < numbers.length - 1) { \nacc.push(delimeter); \n} \nreturn acc; \n}, []); \n}", + "guess_lang": "typescript", + "bbox": [ + 94, + 220, + 672, + 356 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "(a) The code generated by the unconstrained Gemma 2 9B. The code uses the reduce member of arrays in TypeScript. The reduce member is a generic function that accepts a callback as first argument. This callback has three parameters: an accumulator, a current value and an index. The types of the second and third parameter are fixed to the element type of the callee array and number. The first argument type of the callback function is automatically inferred from the type of the second argument to the reduce function, in this case an empty array. The type of this array is never [], a special type denoting arrays without members. Since we can assign this type to the callee type number[], the accumulator parameter obtains type never[]. However, calling push on this type results in a type violation as the pushed value of type number is not compatible with the inferred element type never. This issue is well known in TypeScript and even human developers frequently stumble upon this issue [47, 48].", + "bbox": [ + 88, + 361, + 907, + 513 + ], + "page_idx": 37 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "function intersperse(numbers: number[], delimiter: number): number[] {\nreturn numbers.reduce((acc: number], num: number, index: number) => {\nif (index > 0) {\nacc.push(delimeter);\n}\nacc.push(num);\nreturn acc;\n}, []);\n}", + "guess_lang": "typescript", + "bbox": [ + 94, + 536, + 701, + 673 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "(b) The code generated by Gemma 2 9B [64] with type constraints. We force the model to generate type annotations for the callback function in reduce and thus enable the model to generate the correct type annotation and avoid the issue in the unconstrained code.", + "bbox": [ + 88, + 680, + 907, + 725 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "Figure 18. Comparison between the function generated by the unconstrained Gemma 2 9B [73] and the code generated with the guidance of type constraints on the problem intersperse in HumanEval [14]. The task in this problem is to insert a delimiter number between consecutive elements of an input list.", + "bbox": [ + 88, + 748, + 907, + 793 + ], + "page_idx": 37 + }, + { + "type": "page_number", + "text": "171:38", + "bbox": [ + 92, + 84, + 135, + 94 + ], + "page_idx": 37 + }, + { + "type": "header", + "text": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev", + "bbox": [ + 339, + 83, + 905, + 95 + ], + "page_idx": 37 + } +] \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09246/056a1ec9-5612-40a0-bb59-6e9ca9d014d6_model.json b/data/2025/2504_09xxx/2504.09246/056a1ec9-5612-40a0-bb59-6e9ca9d014d6_model.json new file mode 100644 index 0000000000000000000000000000000000000000..bbc7bdea560773c5c33753d243ae166fa702087b --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/056a1ec9-5612-40a0-bb59-6e9ca9d014d6_model.json @@ -0,0 +1,5765 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.031, + 0.189, + 0.077, + 0.672 + ], + "angle": 270, + "content": "arXiv:2504.09246v2 [cs.LG] 8 May 2025" + }, + { + "type": "title", + "bbox": [ + 0.089, + 0.114, + 0.885, + 0.139 + ], + "angle": 0, + "content": "Type-Constrained Code Generation with Language Models" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.151, + 0.468, + 0.168 + ], + "angle": 0, + "content": "NIELS MündLER*, ETH Zurich, Switzerland" + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.17, + 0.388, + 0.187 + ], + "angle": 0, + "content": "JINGXUAN HE*, UC Berkeley, USA" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.189, + 0.36, + 0.204 + ], + "angle": 0, + "content": "HAO WANG,UC Berkeley,USA" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.206, + 0.382, + 0.222 + ], + "angle": 0, + "content": "KOUSHIK SEN, UC Berkeley, USA" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.224, + 0.374, + 0.24 + ], + "angle": 0, + "content": "DAWN SONG, UC Berkeley, USA" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.242, + 0.465, + 0.258 + ], + "angle": 0, + "content": "MARTIN VECHEV, ETH Zurich, Switzerland" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.267, + 0.908, + 0.48 + ], + "angle": 0, + "content": "Large language models (LLMs) have achieved notable success in code generation. However, they still frequently produce uncompilable output because their next-token inference procedure does not model formal aspects of code. Although constrained decoding is a promising approach to alleviate this issue, it has only been applied to handle either domain-specific languages or syntactic features of general-purpose programming languages. However, LLMs frequently generate code with typing errors, which are beyond the domain of syntax and generally hard to adequately constrain. To address this challenge, we introduce a type-constrained decoding approach that leverages type systems to guide code generation. For this purpose, we develop novel prefix automata and a search over inhabitable types, forming a sound approach to enforce well-typedness on LLM-generated code. We formalize our approach on a foundational simply-typed language and extend it to TypeScript to demonstrate practicality. Our evaluation on the HumanEval and MBPP datasets shows that our approach reduces compilation errors by more than half and significantly increases functional correctness in code synthesis, translation, and repair tasks across LLMs of various sizes and model families, including state-of-the-art open-weight models with more than 30B parameters. The results demonstrate the generality and effectiveness of our approach in constraining LLM code generation with formal rules of type systems." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.487, + 0.909, + 0.518 + ], + "angle": 0, + "content": "CCS Concepts: • Theory of computation → Formal languages and automata theory; • Software and its engineering → General programming languages; • Computing methodologies → Machine learning." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.523, + 0.908, + 0.556 + ], + "angle": 0, + "content": "Additional Key Words and Phrases: Code Generation, Language Model, Type System, Program Synthesis, Program Translation, Program Repair, Constrained Decoding" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.568, + 0.245, + 0.582 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.588, + 0.91, + 0.704 + ], + "angle": 0, + "content": "Large language models (LLMs) are remarkably successful in diverse fields [12, 27, 49] and increasingly used in everyday coding tasks [25, 68]. They show promising capabilities at synthesizing code from natural language descriptions [37, 59], translating between programming languages [59], and repairing incorrect programs [44, 74]. Despite these achievements, LLM-generated code often contains compilation errors, logic flaws, or security vulnerabilities [20, 53, 55]. These issues arise because LLMs generate code by iteratively sampling the next token from a vocabulary of tokens – a probabilistic process that does not provide any formal guarantees." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.705, + 0.909, + 0.772 + ], + "angle": 0, + "content": "A promising technique to address this limitation is constrained decoding, which enforces the formal rules of programming languages during LLMs' code generation process, rejecting invalid tokens and ensuring only valid tokens are considered as generation candidates. Previous studies have shown that constrained decoding improves adherence to program syntax [8, 41, 57, 66]." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.78, + 0.325, + 0.795 + ], + "angle": 0, + "content": "*Both authors co-lead this project." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.805, + 0.909, + 0.861 + ], + "angle": 0, + "content": "Authors' Contact Information: Niels Mündler, niels.muendler@inf.ethz.ch, ETH Zurich, Switzerland; Jingxuan He, jingxuan. he@berkeley.edu, UC Berkeley, USA; Hao Wang, hwang628@berkeley.edu, UC Berkeley, USA; Koushik Sen, ksen@berkeley. edu, UC Berkeley, USA; Dawn Song, dawnsong@berkeley.edu, UC Berkeley, USA; Martin Vechev, martin.vechev@inf.ethz.ch, ETH Zurich, Switzerland." + }, + { + "type": "image", + "bbox": [ + 0.091, + 0.872, + 0.197, + 0.898 + ], + "angle": 0, + "content": null + }, + { + "type": "footer", + "bbox": [ + 0.089, + 0.901, + 0.755, + 0.914 + ], + "angle": 0, + "content": "This work is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.092, + 0.084, + 0.13, + 0.096 + ], + "angle": 0, + "content": "171:2" + }, + { + "type": "header", + "bbox": [ + 0.341, + 0.083, + 0.908, + 0.097 + ], + "angle": 0, + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.119, + 0.911, + 0.169 + ], + "angle": 0, + "content": "However, these improvements are limited, as syntax accounts for only a small part of overall program correctness. For instance, in our evaluation of state-of-the-art open-weight LLMs (§5), syntactic errors make up on average \\(6\\%\\) of all compilation errors in generated TypeScript code." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.18, + 0.911, + 0.28 + ], + "angle": 0, + "content": "Key Challenge: Generating Well-Typed Code. Beyond program syntax, type systems detect and reject bugs at compile time [40, 43] and are therefore enforced in many popular programming languages [4, 10, 19]. We observe that LLMs struggle to generate well-typed code [20, 29, 63], as typing rules significantly complicate the generation of valid code [62]. In our evaluation of LLMs (§5), on average \\(94\\%\\) of compilation errors result from failing type checks. This suggests a promising direction: guiding LLMs' code generation process by incorporating the formal rules of type systems." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.28, + 0.911, + 0.431 + ], + "angle": 0, + "content": "However, implementing this approach is challenging because type systems can in general not be captured by context-free grammars [43], prohibiting the application of prior constrained decoding methods developed for program syntax [8, 66]. Furthermore, besides deriving and maintaining a type environment for completed expressions during generation (similar to classic type systems), we need to accurately assess and handle partial expressions. Specifically, for each currently generated partial expression, we must decide whether the partial expression can be completed to match a required type. Determining this would allow us to constrain the LLM to provably generate well-typed expressions upon termination, but involves solving the challenging problem of type inhabitation [30, 67] in the novel context of LLM-based code generation." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.441, + 0.911, + 0.608 + ], + "angle": 0, + "content": "This Work: Type-Constrained Decoding. In this work, we introduce type-constrained decoding1, addressing the challenge of generating well-typed code using LLMs. We develop a sound algorithm to determine if a partial program can be completed into a well-typed program. This algorithm is based on a novel non-deterministic automaton we construct. The automaton incrementally builds abstract syntax trees described by the partial program and annotates them with type-relevant context, e.g., declared identifiers and expression types. It leverages such information to maintain a prefix property, ensuring that parsing a program prefix only results in a non-empty set of states when it can be completed into a well-typed program. To guarantee the prefix property, we design a sound type search algorithm that determines whether a partial expression can inhabit a given type. We construct our automaton for a generic, simply-typed Turing-complete calculus [10]." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.608, + 0.911, + 0.807 + ], + "angle": 0, + "content": "To demonstrate its practical effectiveness, we instantiate our approach on a non-trivial subset of TypeScript. We choose TypeScript for three key reasons: (i) it is currently one of the most actively used languages, e.g., in open-source projects on GitHub [26, 38]; (ii) as we show, state-of-the-art LLMs fail to reliably generate well-typed TypeScript code; (iii) its core type system is simple enough [10] to be suitable for developing the first prototype of our approach. We perform a comprehensive evaluation on TypeScript versions of the widely-used HumanEval and MBPP benchmarks [5, 13, 14], focusing on three common coding tasks: synthesis, translation, and repair. Our experimental results show that type-constrained decoding significantly enhances code generation for LLMs of various sizes (2B-34B parameters). For synthesis and translation, it reduces compilation errors by more than half and increases functional correctness relatively by \\(3.5\\%\\) to \\(5.5\\%\\). Additionally, it enhances functionally correct repair of non-compiling code relatively by \\(37\\%\\) on average. We further investigate our approach in depth through a runtime analyses and case studies." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.807, + 0.911, + 0.874 + ], + "angle": 0, + "content": "We highlight that our type constraining approach is broadly applicable to any language derivable from the core calculus, any code generation task in these languages, and any LLM utilizing next-token generation. In §6, we envision how our approach can benefit other production-ready languages and closed-weight LLMs." + }, + { + "type": "page_footnote", + "bbox": [ + 0.089, + 0.899, + 0.845, + 0.914 + ], + "angle": 0, + "content": "1Our code implementation is publicly available at https://github.com/eth-sri/type-constrained-code-generation." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.09, + 0.083, + 0.496, + 0.098 + ], + "angle": 0, + "content": "Type-Constrained Code Generation with Language Models" + }, + { + "type": "page_number", + "bbox": [ + 0.87, + 0.084, + 0.909, + 0.097 + ], + "angle": 0, + "content": "171:3" + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.119, + 0.762, + 0.135 + ], + "angle": 0, + "content": "Main Contributions. Our main contributions can be summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.139, + 0.907, + 0.171 + ], + "angle": 0, + "content": "- A prefix automaton and a type search algorithm to enable type constraining for LLM-based code generation, demonstrated on a generic, simply-typed core calculus (§3)." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.172, + 0.875, + 0.188 + ], + "angle": 0, + "content": "- An instantiation and extension of our approach to the popular TypeScript language (§4)." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.189, + 0.907, + 0.222 + ], + "angle": 0, + "content": "- An extensive evaluation across various LLMs and coding tasks, showing the significant benefit of our approach in reducing compilation errors and increasing functional correctness (§5)." + }, + { + "type": "list", + "bbox": [ + 0.109, + 0.139, + 0.907, + 0.222 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.089, + 0.234, + 0.379, + 0.25 + ], + "angle": 0, + "content": "2 Background and Overview" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.254, + 0.908, + 0.304 + ], + "angle": 0, + "content": "In this section, we first provide relevant background on LLM-based code generation and constrained decoding. Then, we motivate our type constraining approach using an illustrative example and present a high-level overview of its construction." + }, + { + "type": "title", + "bbox": [ + 0.088, + 0.317, + 0.816, + 0.334 + ], + "angle": 0, + "content": "2.1 Background on LLM-based Code Generation and Constrained Decoding" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.337, + 0.485, + 0.652 + ], + "angle": 0, + "content": "LLM-based Code Generation. LLMs generate code incrementally by sampling one token at a time in an iterative manner, as depicted in Algorithm 1 (without the blue highlights). A user prompt \\( x \\) specifies a code generation task for a trained LLM. At Line 1, the output program \\( s \\) is initialized to an empty string or a program prefix provided in \\( x \\), e.g., a function signature. At the beginning of each generation iteration (Line 3), the LLM takes as input a concatenation \\( x \\circ s \\) of the prompt \\( x \\) and the current partial program \\( s \\). It then predicts a probability distribution \\( v \\) over a fixed, finite set of tokens, the vocabulary, where each token may be a single Unicode character or a string of multiple characters. All common singleton characters are included in LLMs' vocabulary, ensuring that any standard program can be produced by concatenating tokens [60]. Next, based on distri" + }, + { + "type": "text", + "bbox": [ + 0.495, + 0.352, + 0.909, + 0.402 + ], + "angle": 0, + "content": "Algorithm 1 Vanilla LLM-based code generation (without the blue highlights) vs. constrained decoding (with the blue highlights)" + }, + { + "type": "text", + "bbox": [ + 0.495, + 0.407, + 0.906, + 0.44 + ], + "angle": 0, + "content": "Input: LLM, prompt \\( x \\), completion engine \\( CE_L \\) for language \\( L \\)" + }, + { + "type": "text", + "bbox": [ + 0.496, + 0.441, + 0.797, + 0.457 + ], + "angle": 0, + "content": "Output: Program \\(s\\) such that \\(s \\in L\\)" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.458, + 0.623, + 0.471 + ], + "angle": 0, + "content": "1: initialize s" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.474, + 0.66, + 0.488 + ], + "angle": 0, + "content": "2: while true do" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.49, + 0.705, + 0.506 + ], + "angle": 0, + "content": "3: \\(\\pmb{v} := \\mathrm{LLM}(x \\circ s)\\)" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.508, + 0.695, + 0.521 + ], + "angle": 0, + "content": "4: while true do" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.525, + 0.652, + 0.538 + ], + "angle": 0, + "content": "5: \\(t\\sim \\pmb{v}\\)" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.54, + 0.82, + 0.556 + ], + "angle": 0, + "content": "6: if \\( CE_L(s \\circ t) \\) then break" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.558, + 0.902, + 0.572 + ], + "angle": 0, + "content": "7: elif \\( t = EOS \\) and \\( s \\in L \\) then break" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.574, + 0.83, + 0.589 + ], + "angle": 0, + "content": "8: else \\(\\pmb{v}[t] := 0\\); normalize \\(\\pmb{v}\\)" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.59, + 0.767, + 0.605 + ], + "angle": 0, + "content": "9: if \\( t = EOS \\) then break" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.608, + 0.643, + 0.621 + ], + "angle": 0, + "content": "10: \\(s := s \\circ t\\)" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.625, + 0.609, + 0.637 + ], + "angle": 0, + "content": "11: return s" + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.458, + 0.902, + 0.637 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.653, + 0.91, + 0.686 + ], + "angle": 0, + "content": "bution \\(\\pmb{v}\\), a token \\(t\\) is sampled (Line 5) and appended to the program \\(s\\) (Line 10). This process is repeated until we encounter the special token EOS which signifies the end of the sequence (Line 9)." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.686, + 0.909, + 0.77 + ], + "angle": 0, + "content": "LLMs learn to predict adequate probability distributions from extensive training on natural and programming languages [12, 59, 73]. These distributions implicitly encode language rules, allowing LLMs to successfully solve code generation tasks [13, 28, 59]. However, LLMs may fail to infer complex rules [9, 21, 72], derive incomplete rules for less common languages [13, 51], and, due to the probabilistic nature of its generation procedure, not consistently follow formal language rules." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.777, + 0.911, + 0.895 + ], + "angle": 0, + "content": "Constrained Decoding. The aforementioned shortcoming of LLMs can be mitigated by employing constrained decoding, which analyzes the intermediate model outputs \\( s \\) during the generation process and enforces that only valid tokens are incorporated. Specifically, constrained decoding leverages a completion engine \\( CE_{L} \\), specific to a language \\( L \\). Computing \\( CE_{L}(s) \\) returns whether partial program \\( s \\) can be completed to a well-formed program in \\( L \\), meaning whether there exists a (possibly empty) string \\( s' \\) such that \\( s \\circ s' \\in L \\). Equivalently, \\( CE_{L}(s) \\) determines whether \\( s \\) belongs to the prefix language \\( L^{p} \\) of \\( L \\), i.e., whether \\( s \\in L^{p} \\). \\( L^{p} \\) is formally defined as follows:" + }, + { + "type": "text", + "bbox": [ + 0.108, + 0.899, + 0.831, + 0.917 + ], + "angle": 0, + "content": "DEFINITION 1. For a given language \\( L \\), its prefix language is \\( L^p \\coloneqq \\{s \\mid \\exists s': s \\circ s' \\in L\\} \\)." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.092, + 0.085, + 0.131, + 0.096 + ], + "angle": 0, + "content": "171:4" + }, + { + "type": "header", + "bbox": [ + 0.34, + 0.083, + 0.908, + 0.098 + ], + "angle": 0, + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + }, + { + "type": "table", + "bbox": [ + 0.097, + 0.116, + 0.905, + 0.222 + ], + "angle": 0, + "content": "
function is_int(text: string): boolean {<completion>VanillaSyntaxTypesDescription
const num = Number(text);(1) ;acceptrejectrejectSyntactically invalid
return !isNaN(num) &&(2) beracceptacceptrejectUndeclared identifier
parseInt(num <completion>(3) ()acceptacceptrejectDisallowed operator
(4), 10)(5).string()acceptacceptrejectInvalid argument type
acceptacceptacceptWell-formed option
" + }, + { + "type": "image_caption", + "bbox": [ + 0.088, + 0.228, + 0.908, + 0.275 + ], + "angle": 0, + "content": "Figure 1. Left is a partial TypeScript program derived from instance #113 of the MBPP benchmark [5], awaiting completion. Right are five completion options: (1)-(4) are invalid and (5) is well-formed. Our type-constrained decoding is the only approach capable of correctly rejecting invalid completions and accepting the valid one." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.3, + 0.91, + 0.449 + ], + "angle": 0, + "content": "As illustrated in blue highlights of Algorithm 1, constrained decoding differs from vanilla LLM-based code generation by adding an additional sample-and-check loop at Line 4 around the token sampling process at Line 5. A sampled token \\( t \\) is considered further only if \\( s \\circ t \\) can be completed to a well-formed program (Line 6) or \\( t \\) is EOS and \\( s \\) is already well-formed in \\( L \\) (Line 7). Otherwise, the probability of \\( t \\) is set to zero at Line 8, and the sample-and-check loop repeats. Note that a token \\( t \\) satisfying either Line 6 or Line 7 always exists, because \\( s \\) is in \\( L^p \\) and LLMs' vocabulary contains all common characters. Therefore, the number of iterations of the loop at Line 4 is bounded by the fixed LLM vocabulary size. In practice, only few iterations are needed (\\$5.3) and do not require additional LLM inference, ensuring a reasonable runtime overhead compared to vanilla decoding." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.45, + 0.909, + 0.549 + ], + "angle": 0, + "content": "The token-level guarantees extend inductively to guarantee the final program's validity with respect to \\( L \\). At Line 1, we start with a valid prefix in \\( L^p \\), i.e., either an empty string or a valid prefix provided in the user prompt. The check at Line 6 ensures that all intermediate outputs \\( s \\) are prefixes in \\( L^p \\). Additionally, Line 7 and Line 9 ensure that the return statement in Line 11 is reached only if \\( s \\in L \\). As an additional benefit, by steering previously ill-formed generations into well-formed ones, constrained decoding also increases the likelihood of generating functionally correct code." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.55, + 0.91, + 0.616 + ], + "angle": 0, + "content": "Note that commonly used grammar and type checkers can not be used as a completion engine for constrained decoding. They judge whether a program string \\( s \\) is well-formed according to the language \\( L \\), i.e., whether \\( s \\in L \\). When \\( s \\) is not a complete program in \\( L \\), but a valid prefix in \\( L^p \\), they return a different output than \\( CE_L(s) \\), which is not suitable for use in Algorithm 1." + }, + { + "type": "title", + "bbox": [ + 0.088, + 0.629, + 0.573, + 0.646 + ], + "angle": 0, + "content": "2.2 Overview of Our Type Constraining Approach" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.65, + 0.91, + 0.765 + ], + "angle": 0, + "content": "Inadequacy of Syntax-Only Constraining. To apply the constrained decoding algorithm described in §2, one needs to choose a language \\( L \\) and implement the completion engine \\( CE_{L} \\). Recent work has explored defining \\( L \\) as the set of syntactically valid programs, thus leveraging the syntactic rules of programming languages for constrained decoding [8, 66, 71]. However, the benefits of this approach are limited, because syntax accounts for only a small portion of overall program correctness. For instance, across our evaluations (§5), only \\( 3.5\\% \\) of the functional errors and \\( 6\\% \\) of the compilation errors in LLM-generated code are due to syntactic errors." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.766, + 0.911, + 0.916 + ], + "angle": 0, + "content": "We illustrate this limitation using the example in Figure 1. It presents five completion candidates for a partial program: (1)-(4) will lead to compilation errors and only (5) can result in a well-formed program. Based on syntax, completions that contain line terminations or invalid characters (e.g., $) could be rejected (1). However, many other cases, including (2)-(4), do not break syntactic rules but still cause compilation errors. For instance, candidate (2) results in accessing an undeclared identifier. In candidate (3), the function call operator will fail at execution time, as num is a number and can not be called. Candidate (4) passes a value of unexpected format to parseInt, which expects the first argument to be a string. In this example, (4) is generated by CodeLlama 34B [59]. Syntax-only constraining accepts this invalid completion, leading to a non-compilable final output." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.091, + 0.083, + 0.496, + 0.098 + ], + "angle": 0, + "content": "Type-Constrained Code Generation with Language Models" + }, + { + "type": "page_number", + "bbox": [ + 0.87, + 0.084, + 0.909, + 0.097 + ], + "angle": 0, + "content": "171:5" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.119, + 0.91, + 0.268 + ], + "angle": 0, + "content": "Our Approach: Leveraging the Type System. We require stronger constraints to effectively guide the model generation. Beyond syntax, type systems are commonly utilized in compilers, enforcing semantic rules to detect and reject bugs at compile time [23]. For Figure 1, the TypeScript type system would correctly reject code containing erroneous completions (2)-(4). Therefore, in this work, we propose leveraging type systems in constrained decoding to guide code generation. Our method accurately detects that only candidate (5) is a valid completion, guiding CodeLlama 34B to adopt this option and complete the program correctly. As detailed in §5, our experimental results demonstrate that our approach more than halves compiler errors in generated code and consistently increases the proportion of functionally correct programs." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.269, + 0.911, + 0.401 + ], + "angle": 0, + "content": "Incorporating typing rules into code generation offers substantial potential but presents a significant challenge. Previous research has focused primarily on constrained decoding for context-free languages, for which prefixes can be efficiently determined [8, 66, 71]. Type systems, however, require language specifications that exceed the capabilities of context-free grammars [43], inhibiting the direct application of prior techniques to type-constrained decoding. Moreover, determining whether a partially generated expression can be completed to be a well-typed full expression involves not only type checking and inference, as done in traditional compilers, but also addressing type inhabitation [39, 67]." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.401, + 0.91, + 0.552 + ], + "angle": 0, + "content": "To address these challenges, we design and implement a practical approach to determine whether a string can be completed to a well-typed program. We begin by developing a specialized kind of non-deterministic automaton that maintains a prefix property, formally defined in §3.2. This property ensures that every reachable state can lead to an accepting state. We leverage this property to build a completion engine for constrained decoding as in Algorithm 1. We construct such a completion engine to enforce well-typedness for a simply-typed language \\( L_{B} \\) in §3.3-§3.5 and extend it to a core subset of TypeScript in §4. At a high level, the automaton acts as a syntactic parser, additionally maintaining information about initialized variables, enclosing function declarations, and other type-related aspects of the partially parsed syntax tree. This is possible through dynamically created" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.552, + 0.551, + 0.566 + ], + "angle": 0, + "content": "annotated states that track the additional information." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.568, + 0.631, + 0.75 + ], + "angle": 0, + "content": "In Figure 2, we provide a concrete example for our prefix automata. Every state represents the currently parsed syntactic component and additionally tracks the surrounding typing information. For example, after parsing the partial program in Figure 1, the automaton currently parses an expression as the first argument to function parseInt. Transitions are annotated with further code completions that are deemed admissible based on the syntax and typing information. In the first state, the automaton has parsed num, inferring from previous declarations that it represents an identifier of type number. Based on the signature of the parseInt function call, the required type of the completed" + }, + { + "type": "image", + "bbox": [ + 0.645, + 0.559, + 0.91, + 0.712 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.641, + 0.714, + 0.91, + 0.744 + ], + "angle": 0, + "content": "Figure 2. An example of a prefix automaton." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.75, + 0.912, + 0.918 + ], + "angle": 0, + "content": "argument is string. The automaton now determines the admissible transitions from the identifier state. State transitions corresponding to completions (1)-(4) from Figure 1 are disallowed, as they are determined to violate type rules based on the tracked type information. Further, the automaton needs to determine which operations on the current expression num of type number can be applied to obtain an expression of type string. To achieve this, we develop a type reachability search algorithm, which finds string-typed expressions num.toString() and num.isFinite(). toString(). Therefore, it returns that accesses to members .ToString and .isFinite are admissible, resulting in the two depicted transitions with the corresponding labels. In our experiment, CodeLlama 34B chooses to transition along .ToString(), the more likely completion based on its training data. Note that in our actual automaton formalism, as described at the end of §3.2, state transitions are" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.092, + 0.085, + 0.13, + 0.095 + ], + "angle": 0, + "content": "171:6" + }, + { + "type": "header", + "bbox": [ + 0.341, + 0.084, + 0.908, + 0.097 + ], + "angle": 0, + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.119, + 0.908, + 0.152 + ], + "angle": 0, + "content": "on a character level. Figure 2 condenses character-level transitions into string-level transitions for presentation purposes." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.152, + 0.633, + 0.351 + ], + "angle": 0, + "content": "The type reachability algorithm seeks to identify sequences of operators applied to a given expression such that the resulting expression possesses a required type. Conceptually, it performs a search over an abstracted type graph, whose nodes are types, and edges represent well-typed operations connecting the input and output types. An example of such a (partial) graph is shown in Figure 3, with a valid path highlighted in green color. Starting from the derived number type of num, the search first traverses a member access edge to reach the nullary function type () => string. Then, it traverses an edge representing a function call to reach the goal type string, concluding that the combination of traversed operators . toString() is a well-formed" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.352, + 0.907, + 0.4 + ], + "angle": 0, + "content": "completion for Figure 1. The path for num.isFinite().ToString() is analogously valid but omitted in Figure 3 for brevity. This type reachability search is invoked every time a partial expression is parsed, in order to determine valid transitions in the prefix automaton." + }, + { + "type": "image", + "bbox": [ + 0.643, + 0.154, + 0.911, + 0.309 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.641, + 0.313, + 0.908, + 0.344 + ], + "angle": 0, + "content": "Figure 3. An example of a partial type search graph." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.401, + 0.909, + 0.468 + ], + "angle": 0, + "content": "We implement our approach for a significant subset of TypeScript (§4) and experimentally evaluate it for various LLMs and three important code generation tasks: synthesis, translation, and repair (§5). The results demonstrate that our approach provides significant benefits in both reducing compilation errors for LLM-generated code and increasing their functional correctness." + }, + { + "type": "title", + "bbox": [ + 0.088, + 0.48, + 0.437, + 0.496 + ], + "angle": 0, + "content": "3 Our Type Constraining Approach" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.501, + 0.909, + 0.584 + ], + "angle": 0, + "content": "In this section, we first present a generic, simply-typed language \\( L_{B} \\) (§3.1). Then, we present our type constraining approach using \\( L_{B} \\). Specifically, we introduce our prefix automaton formalism (§3.2) and define increasingly complex automata for parsing well-typed fragments of \\( L_{B} \\), beginning with identifiers, literals, and types (§3.3), continuing to expressions, including type search for type-restricted expressions (§3.4), and concluding with statements (§3.5)." + }, + { + "type": "title", + "bbox": [ + 0.088, + 0.597, + 0.385, + 0.614 + ], + "angle": 0, + "content": "3.1 A SimplyTyped Language" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.617, + 0.91, + 0.667 + ], + "angle": 0, + "content": "We define a simply typed, Turing-complete language, \\( L_{B} \\). Its grammar and type system are generic, resembling the principles found in popular statically typed languages, such as TypeScript, Java, and Go. However, there may be a slight bias towards TypeScript, as our implementation is based on it." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.675, + 0.911, + 0.809 + ], + "angle": 0, + "content": "Syntax. The syntax of \\( L_{B} \\) is shown in Figure 4. The language includes expressions, type-annotated variable and function definitions, and control flows. Overall, it is based on a core subset of TypeScript [10] but can be adapted for other statically typed languages. Similar to Bierman et al. [10], we represent Kleene-Star repetitions using an overline, e.g., \\( \\overline{s} \\) represents a sequence of statements \\( s \\), and adhere to the TypeScript documentation to annotate parameter types in function signatures with argument names [17]. We make a distinction between base and extension expressions. The latter applies operators to previous expressions, leading to more complex expressions. This differentiation is useful later in §3.4 for constructing the prefix automaton for parsing expressions." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.816, + 0.912, + 0.918 + ], + "angle": 0, + "content": "Expression Typing Rules. The typing rules for \\( L_B \\)'s expressions are detailed in Figure 5. These rules form a subset of safeFTS, a type-safe portion of TypeScript described by Bierman et al. [10], allowing us to leverage their soundness results. The type rules for \\( L_B \\) use the standard concept of a type environment, denoted as \\( \\Gamma \\), which is a collection of pairs \\( (x : T) \\) of identifiers \\( x \\) and types \\( T \\). We write \\( \\Gamma \\vdash e : T \\) if the expression \\( e \\) has type \\( T \\) in the type environment \\( \\Gamma \\). An expression \\( e \\) is considered valid if its type can be derived by applying the given typing rules." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.09, + 0.083, + 0.495, + 0.097 + ], + "angle": 0, + "content": "Type-Constrained Code Generation with Language Models" + }, + { + "type": "page_number", + "bbox": [ + 0.87, + 0.084, + 0.908, + 0.095 + ], + "angle": 0, + "content": "171:7" + }, + { + "type": "table", + "bbox": [ + 0.097, + 0.116, + 0.877, + 0.376 + ], + "angle": 0, + "content": "
l ::=Literalp ::= x : TTyped Identifier
\\d+Numeric Literal
"\\"w*”String LiteralT ::=Type
true | falseBoolean LiteralnumberNumeric Type
stringString Type
x ::= \\w+IdentifierbooleanBoolean Type
(¯p) => TFunction Type
e ::= e0 | e1Expression
e0 ::=Base Expressions ::=Statement
lLiterallet x : T;Variable Declaration
xIdentifiere;Expression Statement
(¯p) => eFunction Expressionreturn e;Return Statement
(e)Grouped Expression{¯s}Statement Block
e1 ::=Extension Expressionfunction x (¯p) : T {¯s}Function Definition
e ⊙ eBinary Operatorif (e) s else sIf-Then-Else Statement
e(¯e)Function Call
e.nMember AccessM ::=¯sProgram
" + }, + { + "type": "image_caption", + "bbox": [ + 0.088, + 0.385, + 0.908, + 0.417 + ], + "angle": 0, + "content": "Figure 4. The syntax of \\( L_{B} \\). Expressions are categorized into base and extension expressions. The later extends a given expression with suffix operators to form more complicated expressions." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.446, + 0.907, + 0.512 + ], + "angle": 0, + "content": "_literals are evaluated to their respective types (LIT - {NUM, STR, BOOL}). Identifiers \\( x \\) are evaluated based on the corresponding type in the type environment (IDENT). Anonymous functions are typed according to their annotated parameter types, with the return type determined by the returned expression (ANON). Grouping preserves the type of the inner expression (GROUP)." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.513, + 0.91, + 0.597 + ], + "angle": 0, + "content": "Binary operators have predefined signatures \\( S_{1} \\odot S_{2}: T \\), such as number + number : number for addition and \\( T = T: T \\) for assignments. These signatures must be satisfied in well-typed expressions (op). Function calls require parameters to match the function signature (CALL). The type of member accesses \\( e.n \\) is determined using an auxiliary function LOOKUP(S, n), which fetches the type of member \\( n \\) for type \\( S \\). An instantiation of LOOKUP for TypeScript is provided by Bierman et al. [10]." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.606, + 0.909, + 0.671 + ], + "angle": 0, + "content": "Statements and Type Environments. The typing rules for statements are presented in Figure 6. Type environments are modified by statements, in particular variable declarations and function definitions. We use the notation \\(\\Gamma_1 \\vdash s \\rightarrow \\Gamma_2\\) to indicate that after executing statement \\(s\\) in type environment \\(\\Gamma_1\\), the new environment is \\(\\Gamma_2\\)." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.672, + 0.91, + 0.806 + ], + "angle": 0, + "content": "Variable declarations introduce the identifier with declared type into the type environment, provided the identifier is not already defined (DECL). The type environment defines the context to evaluate expressions (EXPR) and return statements (RET). Return statements are only well-typed inside function bodies. The statements inside statement blocks and if-then-else statements must maintain valid type environments, but do not have an external effect (BLOCK, ITE). This also applies to function definitions; however, the defined function is finally added to the external type environment (FUN). Lastly, empty statements do not alter the type environment (NOP), while statement sequences propagate the type environment along the execution (SEQ)." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.815, + 0.908, + 0.881 + ], + "angle": 0, + "content": "Return Types. The rules for checking return types are presented in Figure 7. Firstly, return statements must contain expressions matching the function's declared return type. Secondly, such an expression must be returned on every execution path. We use the notation \\(\\Gamma \\vdash \\overline{s} : R\\) to indicate the sequence of statements \\(\\overline{s}\\) ensures a return value of type \\(R\\)." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.882, + 0.908, + 0.916 + ], + "angle": 0, + "content": "For variable declarations and expression statements, the return type of the subsequent statements is considered (R-DECL, R-EXPR). The return type of a return statement directly corresponds to the" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.092, + 0.085, + 0.131, + 0.096 + ], + "angle": 0, + "content": "171:8" + }, + { + "type": "header", + "bbox": [ + 0.34, + 0.084, + 0.908, + 0.098 + ], + "angle": 0, + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + }, + { + "type": "equation", + "bbox": [ + 0.091, + 0.12, + 0.919, + 0.147 + ], + "angle": 0, + "content": "\\[\n\\left[ \\mathrm {L I T - N U M} \\right] \\frac {}{\\Gamma \\vdash \\backslash d + : n u m b e r} \\quad \\left[ \\mathrm {L I T - S T R} \\right] \\frac {}{\\Gamma \\vdash \" \\backslash w * \" : s t r i n g} \\quad \\left[ \\mathrm {L I T - B O O L} \\right] \\frac {}{\\Gamma \\vdash \\text {t r u e , f a l s e : b o o l e a n}}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.107, + 0.155, + 0.905, + 0.194 + ], + "angle": 0, + "content": "\\[\n\\left[ \\mathrm {I D E N T} \\right] \\frac {(x : T) \\in \\Gamma}{\\Gamma \\vdash x : T} \\quad \\left[ \\mathrm {A N O N} \\right] \\frac {\\Gamma \\cup \\overline {{p}} \\vdash e : T}{\\Gamma \\vdash (\\overline {{p}}) \\Rightarrow e : (\\overline {{p}}) \\Rightarrow T} \\quad \\left[ \\mathrm {C A L L} \\right] \\frac {\\Gamma \\vdash f : (\\overline {{x}} : \\overline {{S}}) \\Rightarrow T \\quad \\Gamma \\vdash \\overline {{e}} : \\overline {{S}}}{\\Gamma \\vdash f (\\overline {{e}}) : T}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.092, + 0.202, + 0.918, + 0.237 + ], + "angle": 0, + "content": "\\[\n\\left[ \\mathrm {G R O U P} \\right] \\frac {\\Gamma \\vdash e : T}{\\Gamma \\vdash (e) : T} \\quad \\left[ \\mathrm {O P} \\right] \\frac {\\Gamma \\vdash e _ {1} : S _ {1} \\quad \\Gamma \\vdash e _ {2} : S _ {2} \\quad S _ {1} \\odot S _ {2} : T}{\\Gamma \\vdash e _ {1} \\odot e _ {2} : T} \\quad \\left[ \\mathrm {M E M} \\right] \\frac {\\Gamma \\vdash e : S \\quad \\text {L O O K U P} (S , n) = T}{\\Gamma \\vdash e . n : T}\n\\]" + }, + { + "type": "image_caption", + "bbox": [ + 0.331, + 0.248, + 0.665, + 0.264 + ], + "angle": 0, + "content": "Figure 5. Typing rules for \\( L_B \\)'s expressions." + }, + { + "type": "equation", + "bbox": [ + 0.091, + 0.316, + 0.916, + 0.351 + ], + "angle": 0, + "content": "\\[\n\\left[ \\mathrm {D E C L} \\right] \\frac {x \\notin \\Gamma}{\\Gamma \\vdash \\operatorname {l e t} x : T ; \\rightarrow \\Gamma \\cup (x : T)} \\quad \\left[ \\mathrm {E X P R} \\right] \\frac {\\Gamma \\vdash e : T}{\\Gamma \\vdash e ; \\rightarrow \\Gamma} \\quad \\left[ \\mathrm {R E T} \\right] \\frac {\\text {i n s i d e f u n c t i o n b o d y} \\quad \\Gamma \\vdash e : T}{\\Gamma \\vdash \\operatorname {r e t u r n} e ; \\rightarrow \\Gamma}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.154, + 0.359, + 0.854, + 0.397 + ], + "angle": 0, + "content": "\\[\n\\left[ \\mathrm {B L O C K} \\right] \\frac {\\Gamma \\vdash \\overline {{s _ {B}}} \\leftrightarrow \\Gamma_ {B}}{\\Gamma \\vdash \\{\\overline {{s _ {B}}} \\} \\nrightarrow \\Gamma} \\quad \\left[ \\mathrm {F U N} \\right] \\frac {x \\notin \\Gamma \\qquad \\Gamma \\cup (x : (\\overline {{p}}) = > T) \\cup (\\overline {{p}}) \\vdash \\overline {{s _ {x}}} \\nrightarrow \\Gamma_ {x}}{\\Gamma_ {1} \\vdash \\text {f u n c t i o n} x (\\overline {{p}}) : T \\{\\overline {{s _ {x}}} \\} \\nrightarrow \\Gamma \\cup (x : (\\overline {{p}}) = > T)}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.101, + 0.404, + 0.907, + 0.443 + ], + "angle": 0, + "content": "\\[\n\\left[ \\mathrm {I T E} \\right] \\frac {\\Gamma \\vdash s _ {i f} \\rightarrow \\Gamma_ {i f} \\quad \\Gamma \\vdash s _ {e l s e} \\leftrightarrow \\Gamma_ {e l s e}}{\\Gamma \\vdash \\mathrm {i f} (e) s _ {i f} \\mathrm {e l s e} s _ {e l s e} \\leftrightarrow \\Gamma} \\quad \\left[ \\mathrm {N O P} \\right] \\frac {}{\\Gamma \\vdash \\bullet \\rightarrow \\Gamma} \\quad \\left[ \\mathrm {S E Q} \\right] \\frac {\\Gamma_ {1} \\vdash \\bar {s} \\leftrightarrow \\Gamma_ {2} \\quad \\Gamma_ {2} \\vdash s \\leftrightarrow \\Gamma_ {3}}{\\Gamma_ {1} \\vdash \\bar {s} s \\leftrightarrow \\Gamma_ {3}}\n\\]" + }, + { + "type": "image_caption", + "bbox": [ + 0.197, + 0.454, + 0.799, + 0.47 + ], + "angle": 0, + "content": "Figure 6. Type environment extension rules for sequences of statements in \\( L_{B} \\)." + }, + { + "type": "equation", + "bbox": [ + 0.154, + 0.521, + 0.841, + 0.554 + ], + "angle": 0, + "content": "\\[\n\\left[ R - D E C L \\right] \\frac {\\Gamma \\vdash \\bar {s} : R}{\\Gamma \\vdash \\operatorname {l e t} x : T ; \\bar {s} : R} \\quad \\left[ R - E X P R \\right] \\frac {\\Gamma \\vdash \\bar {s} : R}{\\Gamma \\vdash e ; \\bar {s} : R} \\quad \\left[ R - R E T \\right] \\frac {\\Gamma \\vdash e : R}{\\Gamma \\vdash \\operatorname {r e t u r n} e ; \\bar {s} : R}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.209, + 0.56, + 0.791, + 0.597 + ], + "angle": 0, + "content": "\\[\n\\left[ \\mathrm {R - B L O C K - S E L F} \\right] \\frac {\\Gamma \\vdash \\overline {{s _ {B}}} : R \\quad \\Gamma \\vdash \\overline {{s}}}{\\Gamma \\vdash \\{\\overline {{s _ {B}}} \\} \\overline {{s}} : R} \\quad \\left[ \\mathrm {R - B L O C K - N E X T} \\right] \\frac {\\Gamma \\vdash \\overline {{s _ {B}}} \\quad \\Gamma \\vdash \\overline {{s}} : R}{\\Gamma \\vdash \\{\\overline {{s _ {B}}} \\} \\overline {{s}} : R}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.216, + 0.603, + 0.78, + 0.641 + ], + "angle": 0, + "content": "\\[\n\\left[ \\mathrm {R - F U N} \\right] \\frac {\\Gamma \\cup (x : (\\bar {p} \\Rightarrow R)) \\vdash \\bar {s} : R ^ {\\prime} \\quad \\Gamma \\cup (x : (\\bar {p}) \\Rightarrow R) \\cup (\\bar {p}) \\vdash \\bar {s _ {x}} : R}{\\Gamma \\vdash \\text {f u n c t i o n} x (\\bar {p}): R \\{\\bar {s _ {x}} \\} \\bar {s} : R ^ {\\prime}}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.138, + 0.649, + 0.86, + 0.689 + ], + "angle": 0, + "content": "\\[\n\\left[ \\mathrm {R - I T E - S E L F} \\right] \\frac {\\Gamma \\vdash s _ {i f} : R \\quad \\Gamma \\vdash s _ {e l s e} : R}{\\Gamma \\vdash \\mathrm {i f} (e) s _ {i f} \\mathrm {e l s e} s _ {e l s e} \\bar {s} : R} \\quad \\left[ \\mathrm {R - I T E - N E X T} \\right] \\frac {\\Gamma \\vdash \\bar {s} : R}{\\Gamma \\vdash \\mathrm {i f} (e) s _ {i f} \\mathrm {e l s e} s _ {e l s e} \\bar {s} : R}\n\\]" + }, + { + "type": "image_caption", + "bbox": [ + 0.315, + 0.701, + 0.682, + 0.716 + ], + "angle": 0, + "content": "Figure 7. \\(L_{B}\\)'s typing rules for function returns." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.765, + 0.91, + 0.866 + ], + "angle": 0, + "content": "type of the returned expression (R-RET). For statement blocks, the return type is decided by either the block itself or the subsequent statements (R-BLOCK-SELF, R-BLOCK-NEXT). In function definitions, the return type is determined by the type of the subsequent statements, similar to expression statements. It is additionally required that the function body returns a type matching the declared return type (R-FUN). For if-then-else statements, both branches must return the same type (R-ITE-SELF), or the return type is determined by the following statements (R-ITE-NEXT)." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.88, + 0.908, + 0.914 + ], + "angle": 0, + "content": "Language Definition. In summary, a program \\( s \\) is in language \\( L_{B} \\) if both (i) \\( s \\) conform to the grammar in Figure 4 and (ii) \\( s \\) is well-typed according to the typing rules in Figures 5-7." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.09, + 0.083, + 0.496, + 0.098 + ], + "angle": 0, + "content": "Type-Constrained Code Generation with Language Models" + }, + { + "type": "page_number", + "bbox": [ + 0.87, + 0.084, + 0.909, + 0.097 + ], + "angle": 0, + "content": "171:9" + }, + { + "type": "title", + "bbox": [ + 0.089, + 0.119, + 0.411, + 0.135 + ], + "angle": 0, + "content": "3.2 Prefix Automaton Definition" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.139, + 0.91, + 0.223 + ], + "angle": 0, + "content": "We introduce a general definition of prefix automata, beginning with basic automata concepts. Prefix automata are standard automata that ensure a special prefix property2. This property enables us to use a prefix automaton to decide whether some string is in the prefix language \\( L^p \\) of a given language \\( L \\). That is, the prefix automaton can function as a completion engine \\( CE_L \\) to facilitate constrained decoding, as described in §2." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.222, + 0.911, + 0.456 + ], + "angle": 0, + "content": "We consider an automaton \\( A \\coloneqq \\langle \\Sigma, Q, \\delta, I, F \\rangle \\), a tuple of the five following elements: (i) \\( \\Sigma \\) is an alphabet of input symbols; (ii) \\( Q \\) is a set of states; (iii) \\( \\delta : Q \\times \\Sigma \\mapsto \\mathcal{P}(Q) \\) is a computable transition function that maps a state and an input symbol to a finite set of next states; (iv) \\( I \\subseteq Q \\) is a finite set of initial states; and (v) \\( F \\subseteq Q \\) is a decidable set of accepting states. As a convention, we denote a symbol in \\( \\Sigma \\) as \\( c \\), a string of symbols in \\( \\Sigma^* \\) as \\( s \\), the empty string as \\( \\varepsilon \\) and an operator for concatenating symbols and strings as \\( \\circ \\). The transition function \\( \\delta \\) maps a given state to all possible subsequent states. When \\( \\delta \\) is applied on a set of states \\( \\mathbf{q} \\subseteq Q \\), we take the union of the results as output, i.e., \\( \\delta(\\mathbf{q}, c) \\coloneqq \\bigcup_{q \\in \\mathbf{q}} \\delta(q, c) \\). The transition function defines a directed graph \\( G \\) over \\( Q \\), where every state is a node and there is an edge annotated with \\( c \\) from \\( q \\) to \\( q' \\) if \\( q' \\in \\delta(q, c) \\). The language parsed by \\( A \\) comprises all strings \\( s \\) such that traversing \\( G \\) from some initial state in \\( I \\) along the edges annotated with \\( c_i \\) for \\( c_1 \\circ c_2 \\circ \\ldots \\circ c_n = s \\), it is possible to reach some accepting state in \\( F \\). Formally, we define recursively a traversal function \\( \\gamma \\) for states \\( \\mathbf{q} \\) as \\( \\gamma(\\mathbf{q}, \\varepsilon) \\coloneqq \\mathbf{q} \\) and \\( \\gamma(\\mathbf{q}, s \\circ c) \\coloneqq \\delta(\\gamma(\\mathbf{q}, s), c) \\). The language accepted by \\( A \\) is then defined as \\( L(A) \\coloneqq \\{s \\mid \\gamma(I, s) \\cap F \\neq \\emptyset\\} \\). The traversal function has two intuitive properties concerning reachability that can be shown inductively:" + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.458, + 0.79, + 0.475 + ], + "angle": 0, + "content": "(P1) A path along the graph can be split arbitrarily, i.e., \\(\\gamma (\\mathbf{q},s\\circ s^{\\prime}) = \\gamma (\\gamma (\\mathbf{q},s),s^{\\prime})\\)" + }, + { + "type": "text", + "bbox": [ + 0.096, + 0.475, + 0.909, + 0.492 + ], + "angle": 0, + "content": "(P2) If a state is reached by \\( s \\circ s' \\), some state is reachable by \\( s \\), i.e., \\( \\gamma(\\mathbf{q}, s \\circ s') \\neq \\emptyset \\Rightarrow \\gamma(\\mathbf{q}, s) \\neq \\emptyset \\)." + }, + { + "type": "list", + "bbox": [ + 0.096, + 0.458, + 0.909, + 0.492 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.494, + 0.91, + 0.529 + ], + "angle": 0, + "content": "An automaton satisfies the prefix property or is a prefix automaton, if there is a path from every reachable state to some accepting state, or formally:" + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.535, + 0.91, + 0.571 + ], + "angle": 0, + "content": "DEFINITION 2. For an automaton \\( A \\), the prefix property holds iff \\( \\forall q \\in \\gamma(I, s) : \\exists s' : \\gamma(q, s') \\cap F \\neq \\emptyset \\). The automaton is a prefix automaton if it satisfies the prefix property." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.577, + 0.91, + 0.644 + ], + "angle": 0, + "content": "Intuitively, for such automata, reaching some state by consuming string \\( s \\) implies that \\( s \\) is a prefix to some member of \\( L(A) \\). We define the reachable language of \\( A \\), all inputs that result in some state, as \\( L_r(A) := \\{s \\mid \\gamma(I, s) \\neq \\emptyset\\} \\). Below, we establish the equivalence of \\( L_r(A) \\) and \\( L(A)^p \\), the prefix language of \\( L(A) \\) as defined in Definition 1." + }, + { + "type": "text", + "bbox": [ + 0.108, + 0.652, + 0.592, + 0.67 + ], + "angle": 0, + "content": "LEMMA 1. If \\( A \\) is a prefix automaton, then \\( L(A)^p = L_r(A) \\)." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.677, + 0.912, + 0.761 + ], + "angle": 0, + "content": "Proof. For any \\( s \\in L(A)^p \\) there exists \\( s' \\) such that \\( s \\circ s' \\in L(A) \\), by the definition of prefix languages. By the definition of \\( L(A) \\), this implies \\( \\gamma(I, s \\circ s') \\neq \\emptyset \\). Then, using (P2), we further derive \\( \\gamma(I, s) \\neq \\emptyset \\), i.e., \\( s \\in L_r(A) \\). Therefore, \\( L(A)^p \\subseteq L_r(A) \\) holds. The other direction also holds. We first see that \\( s \\in L_r(A) \\implies \\gamma(I, s) \\neq \\emptyset \\). Then applying Definition 2 and (P1), we find \\( \\exists s': \\gamma(I, s \\circ s') \\cap F \\neq \\emptyset \\), implying \\( s \\circ s' \\in L(A) \\) and thus \\( s \\in L(A)^p \\)." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.768, + 0.91, + 0.803 + ], + "angle": 0, + "content": "Note that \\( L(A)^P \\subseteq L_r(A) \\) holds generally for automata, since the first half of the proof does not require the prefix property." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.809, + 0.912, + 0.895 + ], + "angle": 0, + "content": "From Prefix Automata to Completion Engines. With Lemma 1, given a prefix automaton \\( A \\), we can define a convenient-to-compute completion engine for the underlying language \\( L(A) \\): \\( CE_{L(A)}(s) \\coloneqq \\gamma(I, s) \\neq \\emptyset \\). Since our target language is \\( L \\) and not \\( L(A) \\), we now need to determine the relationship between \\( L(A) \\) and \\( L \\). If we construct \\( A \\) such that it parses a subset of \\( L \\), i.e., \\( L(A) \\subseteq L \\), we are guaranteed that all LLM generations constrained by \\( CE_{L(A)} \\) lie in \\( L \\). Conversely, if \\( L(A) \\supseteq L \\)," + }, + { + "type": "page_footnote", + "bbox": [ + 0.089, + 0.9, + 0.816, + 0.917 + ], + "angle": 0, + "content": "Note that the prefix property defined in our work differs from the one discussed in classical texts, e.g., [31]" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.092, + 0.084, + 0.138, + 0.096 + ], + "angle": 0, + "content": "171:10" + }, + { + "type": "header", + "bbox": [ + 0.34, + 0.083, + 0.908, + 0.097 + ], + "angle": 0, + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.118, + 0.91, + 0.236 + ], + "angle": 0, + "content": "we are guaranteed that every string in \\( L \\) can be expressed under constrained decoding, but not that every generation is valid. For example, if \\( A \\) permits all syntactically correct programs, it guarantees that all well-typed programs can be generated, but permits ill-typed programs as well. Therefore, \\( L(A) \\subseteq L \\) is required to achieve our goal of enforcing well-typedness on LLM-generated code. Ideally, \\( A \\) would parse \\( L \\) exactly, i.e., \\( L(A) = L \\), which in our setting additionally guarantees that every well-typed program can be expressed under the constraints of the completion engine. If this is not achieved, it is important for \\( A \\) to capture a large subset of \\( L \\) to be practically useful." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.245, + 0.909, + 0.361 + ], + "angle": 0, + "content": "Building a Prefix Automaton for \\( L_B \\): Warming up. In the next sections, we will construct a prefix automaton for soundly parsing well-typed programs in \\( L_B \\), by presenting various prefix automata for well-typed fragments of \\( L_B \\). Our final automaton will cover a significant but incomplete subset of \\( L_B \\). Incompleteness exists because to ensure that our algorithms terminate, we do not cover high-order types that are less likely to occur in practice. This is discussed in more detail in §3.4. Our evaluation in §5 empirically demonstrates that our approach sufficiently covers practical use cases to significantly improve the correctness of LLM-generated code." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.362, + 0.909, + 0.427 + ], + "angle": 0, + "content": "We choose \\(\\Sigma\\) to be the set of Unicode characters. This makes our completion engine agnostic to LLM vocabularies. Even though LLMs' vocabularies differ, their tokens are always a string of single or multiple characters. When our completion engine for \\(L_{B}\\) is called during constrained decoding, i.e., at Line 6 of Algorithm 1, it processes the sampled token character by character." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.428, + 0.907, + 0.478 + ], + "angle": 0, + "content": "Before proceeding, we briefly introduce several base prefix automata below, with their precise definitions detailed in Appendix A.1. These automata are later combined, with parts of the transition function being overwritten, to construct more complex automata that capture elements of \\( L_{B} \\)." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.483, + 0.905, + 0.514 + ], + "angle": 0, + "content": "- Union \\( A_X \\cup A_Y \\) parses the language \\( \\{s \\mid s \\in L(A_X) \\cup L(A_Y)\\} \\). It is a prefix automaton if both \\( A_X \\) and \\( A_Y \\) are prefix automata." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.516, + 0.905, + 0.547 + ], + "angle": 0, + "content": "- Concatenation \\( A_{XY} \\) parses the language \\( \\{s \\circ s' \\mid s \\in L(A_X), s' \\in L(A_Y)\\} \\). It is a prefix automaton if \\( A_X \\) and \\( A_Y \\) are both prefix automata, and \\( L(A_Y) \\neq \\emptyset \\)." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.549, + 0.905, + 0.58 + ], + "angle": 0, + "content": "- Kleene-Star \\(A_{\\overline{X}}\\) parses the language \\(\\{\\overline{s} \\mid s \\in L(A_X)\\}\\). It is a prefix automaton if \\(A_X\\) is a prefix automaton." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.583, + 0.757, + 0.598 + ], + "angle": 0, + "content": "- Terminal \\( A_{\\mathsf{S}} \\) parses the language \\( \\{\\mathsf{S}\\} \\), where \\( \\mathsf{S} \\) is a fixed, non-empty string." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.6, + 0.755, + 0.615 + ], + "angle": 0, + "content": "- Empty \\( A_{\\emptyset} \\) parses the empty language \\( \\varnothing \\) and is always a prefix automaton." + }, + { + "type": "list", + "bbox": [ + 0.109, + 0.483, + 0.905, + 0.615 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.088, + 0.63, + 0.614, + 0.648 + ], + "angle": 0, + "content": "3.3 Prefix Automata for Identifiers, Literals, and Types" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.651, + 0.907, + 0.686 + ], + "angle": 0, + "content": "We now introduce prefix automata for basic syntactic elements of \\( L_{B} \\): identifiers, literals, and type annotations. The languages parsed by these automata exactly match their counterparts in \\( L_{B} \\)." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.695, + 0.909, + 0.779 + ], + "angle": 0, + "content": "**Literals.** The prefix automaton for literals \\( A_{I} \\coloneqq A_{\\mathrm{NUM}} \\cup A_{\\mathrm{STR}} \\cup A_{\\mathrm{BOOL}} \\) accepts number, string, and boolean literals as defined in Figure 4. The automata \\( A_{\\mathrm{NUM}}, A_{\\mathrm{STR}} \\), and \\( A_{\\mathrm{BOOL}} \\) are defined by the deterministic finite automaton representation of the corresponding regular expression of the literal. To ensure the prefix property on the finite automata of the regular expression, we prune states from which accepting states can not be reached." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.788, + 0.909, + 0.84 + ], + "angle": 0, + "content": "Identifiers. During parsing, we maintain the current type environment \\(\\Gamma\\), as detailed in §3.5. We define the identifier automaton \\(A_{x}\\) as the union of the terminal automata for identifiers defined in \\(\\Gamma\\). In other words, \\(A_{x} := \\bigcup_{y \\in \\Gamma} A_{y}\\)." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.848, + 0.91, + 0.916 + ], + "angle": 0, + "content": "Types. The type automaton \\( A_{T} \\) accepts type annotations as defined in the grammar of \\( L_{B} \\) (Figure 4). It is defined as \\( A_{T} := A_{\\mathrm{TYPE - LIT}} \\cup A_{\\mathrm{TYPE - FUN}} \\). This includes type literal automaton \\( A_{\\mathrm{TYPE - LIT}} := A_{\\mathrm{string}} \\cup A_{\\mathrm{number}} \\cup A_{\\mathrm{boolean}} \\) and function type automaton \\( A_{\\mathrm{TYPE - FUN}} := A_{(\\overline{p})} \\Rightarrow T \\). The latter is a concatenation of multiple prefix automata, with the parameter and return types recursing on \\( A_{T} \\). This recursive" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.091, + 0.083, + 0.496, + 0.097 + ], + "angle": 0, + "content": "Type-Constrained Code Generation with Language Models" + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.084, + 0.906, + 0.095 + ], + "angle": 0, + "content": "171:11" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.118, + 0.907, + 0.152 + ], + "angle": 0, + "content": "definition is valid, since it ensures a finite set of initial states, defines a decidable accepting set, and preserves the prefix property." + }, + { + "type": "title", + "bbox": [ + 0.089, + 0.166, + 0.457, + 0.183 + ], + "angle": 0, + "content": "3.4 Prefix Automaton for Expressions" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.187, + 0.909, + 0.337 + ], + "angle": 0, + "content": "We introduce prefix automata to parse well-typed expressions in \\( L_{B} \\). We begin by describing an automaton \\( A_{e} \\) to parse expressions whose types are unrestricted, e.g., any expression \\( e \\) in an expression statement \\( e \\);. Then, we present an automaton \\( A_{e} \\downarrow T \\) for expressions whose type is constrained to \\( T \\), e.g., for parameters of function calls. The type-constrained version accepts a string only if the inhabited type of the represented expression matches \\( T \\). To preserve the prefix property, we need to ensure that partial expressions can be completed to inhabit the constrained type. Completions may involve arbitrarily many applications of operators, which may modify the expression type. We therefore introduce a type search algorithm that soundly determines which types an expression can inhabit, and use it to prune transitions that violate the prefix property." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.346, + 0.91, + 0.412 + ], + "angle": 0, + "content": "Unrestricted Expressions. To handle the recursive syntactic structure of expressions, we differentiate two kinds as shown in Figure 4: base expressions, including identifiers, literals, grouped expressions, and anonymous functions, and extension expressions, which are operator applications (binary operator, member access, or function call) that lead to extending a given expression." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.413, + 0.911, + 0.578 + ], + "angle": 0, + "content": "The expression automaton \\( A_{e} \\) is thus defined as the union of base expression automata \\( A_{x}, A_{l}, A_{(e)} \\), and \\( A_{(\\overline{p})} \\Rightarrow e \\), with potential extensions \\( A_{\\odot e}, A_{.n} \\), and \\( A_{(\\overline{e})} \\). The individual base and extension automata are constructed by concatenating the respective terminal automata and recursively \\( A_{e} \\). Additionally, we restrict the type of the recursive \\( A_{e} \\) if the restriction is required by the type system, e.g., for parsing call parameters with a fixed type. We provide additional details on this restriction in Appendix A.2. Since an expression can end after either base or extensions, accepting states of both base and extending automata are accepting states of \\( A_{e} \\). To implement extensions, we start from the base expression automata and recursively adjust \\( A_{e} \\)'s transition function \\( \\delta_{e} \\) by adding outgoing edges from the accepting states of the current automaton to the initial states of the extending automata, or formally:" + }, + { + "type": "equation", + "bbox": [ + 0.16, + 0.586, + 0.833, + 0.631 + ], + "angle": 0, + "content": "\\[\n\\forall X, Y: \\delta_ {e} (q _ {Y} ^ {X}, c) := \\left\\{ \\begin{array}{l l} \\delta_ {Y} (q _ {Y} ^ {X}, c) \\cup \\delta_ {e} (I _ {(\\overline {{e}})} ^ {X}, c) \\cup \\delta_ {e} (I _ {\\odot e} ^ {X}, c) \\cup \\delta_ {e} (I _ {. n} ^ {X}, c) & \\text {i f q _ {Y} ^ {X} \\in F _ {Y}} \\\\ \\delta_ {Y} (q _ {Y} ^ {X}, c) & \\text {o t h e r w i s e ,} \\end{array} \\right.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.639, + 0.909, + 0.822 + ], + "angle": 0, + "content": "where the labels \\( X \\) and \\( Y \\) for a state \\( q_{Y}^{X} \\) represent that a string \\( X \\) has been parsed, and currently the active automaton is \\( A_{Y} \\), which can be one of the following: \\( A_{x}, A_{l}, A_{(e)}, A_{(\\overline{p})} \\Rightarrow e, A_{\\odot e}, A_{.n} \\), and \\( A_{(\\overline{e})} \\). The superscripts are useful for tracking the currently expressed type, enabling us to determine the validity of extensions and transition to type-restricted expressions based on \\( L_{B} \\)'s typing rules. For instance, for state \\( q^{42} \\), the addition operator extension \\( +e \\) and function call extension \\( (\\overline{e}) \\) are syntactically applicable to 42 of type number. While the addition operator with type signature number + number :number is allowed, we can not apply a function call on number. In general, we set \\( I_{Y}^{X} := \\emptyset \\) when \\( Y \\) is an invalid extension to \\( X \\). Moreover, for the extension \\( +e \\) to be valid, \\( e \\) must be of type number. To this end, we transition to a type-restricted expression automaton by setting \\( I_{+e}^{42} \\) to the set of initial states for \\( A_{+} \\circ (A_{e} \\downarrow \\text{number}) \\). Similar to the recursive type automaton, our definition of \\( A_{e} \\) ensures a finite set of initial states and a decidable accepting set." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.832, + 0.91, + 0.916 + ], + "angle": 0, + "content": "Type-Constrained Expressions. To implement \\( A_{e} \\downarrow T \\), we must determine whether a partial expression \\( s \\) can be completed to inhabit type \\( T \\). Completing \\( s \\) without any extension can lead to a possible set of types and repeated extensions can further alter the result type, but we are not guaranteed that the desired type can be reached. Moreover, extensions can be applied indefinitely, prohibiting an exhaustive search of possible completions." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.092, + 0.084, + 0.138, + 0.096 + ], + "angle": 0, + "content": "171:12" + }, + { + "type": "header", + "bbox": [ + 0.34, + 0.083, + 0.908, + 0.099 + ], + "angle": 0, + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.118, + 0.91, + 0.203 + ], + "angle": 0, + "content": "We therefore develop a two-tiered algorithm, which we describe in the following paragraphs. This algorithm first identifies the derivable types DERIVABLE \\((q_{s})\\) of \\(s\\) based on its current state \\(q_{s}\\). DERIVABLE \\((q_{s})\\) refers to the set of inhabitable types for all possible expressions completed from \\(s\\) without extension. Second, a type reachability search REACHABLE \\((\\text{DERIVABLE}(q_{e}), T)\\) is performed to determine if \\(T\\) can be inhabited by extending from the derivable types of \\(s\\)." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.202, + 0.911, + 0.353 + ], + "angle": 0, + "content": "We prune automaton transitions when this type search returns a negative result. To ensure the prefix property, the performed search is sound, i.e., it only returns a positive result if \\( T \\) can be expressed by a valid sequence of extensions. This also aligns with our goal of generating only well-typed programs, ensuring that our expression automata accept a subset of all well-typed expressions of \\( L_{B} \\). To ensure termination, the search is incomplete, i.e., there may be a valid sequence of transitions to express \\( T \\) which is not found by the search and we may end up disallowing generation of a well-typed expression. However, it only avoids traversing types of high complexity that are less likely to occur in practice. We further empirically ensure that our approach is practically effective (§5)." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.359, + 0.428, + 0.575 + ], + "angle": 0, + "content": "Derivable Types. For the first part of the algorithm, we determine all types inhabitable by the currently parsed expression \\(s\\) without extension, i.e., \\(\\mathrm{DERIVABLE}(q_s)\\). For example, while parsing partial identifier \\(x\\) in the type environment \\(\\Gamma := \\{(x : \\text{number}), (xy : \\text{string})\\}\\), we have \\(\\mathrm{DERIVABLE}(q_x) = \\{\\text{number}, \\text{string}\\}\\) and \\(\\mathrm{DERIVABLE}(q_{xy}) = \\{\\text{string}\\}\\). For a final state \\(q\\) of expression \\(e\\), we define \\(\\mathrm{DERIVABLE}(q) := T\\), where \\(\\Gamma \\vdash e : T\\). Different expressions impose different" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.575, + 0.91, + 0.643 + ], + "angle": 0, + "content": "rules on derivability, and we present the detailed rules in Table 1. Note that for grouped expressions and function literals, we need to enumerate reachable types by recursively contained expressions. To avoid explicitly enumerating all reachable types, we integrate the derivability and reachability algorithms. This optimization is discussed in more detail in Appendix A.4." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.65, + 0.91, + 0.686 + ], + "angle": 0, + "content": "LEMMA 2. For state \\( q \\in \\gamma(I_e, s) \\) of partial expression \\( s \\), DERIVABLE(\\( q \\)) returns all \\( T \\) s.t. exists some suffix \\( s' \\) with \\( \\Gamma \\vdash s \\circ s': T \\) and \\( s' \\) does not involve an extension (operator, call, or member access)." + }, + { + "type": "text", + "bbox": [ + 0.108, + 0.692, + 0.714, + 0.71 + ], + "angle": 0, + "content": "PROOF. By case distinction on the possible states of partial expressions." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.716, + 0.911, + 0.918 + ], + "angle": 0, + "content": "Type Reachability. To determine which types are inhabitable by extending a base expression \\( e \\) of a given type \\( T \\) (with binary operator, function call, or member access), we analyze sequences of single extension steps with compatible signatures. This process is conceptualized as a search over a graph where types are nodes and extension steps are edges. For every binary operator \\( \\odot \\) with the signature \\( T \\odot X : S \\), an edge is created from type \\( T \\) to type \\( S \\). As an example, the operator for numerical addition \\( + \\) has the signature number \\( + \\) number: number, thereby forming an edge from number to itself. Furthermore, for every member \\( n \\) of type \\( T \\), we create an edge from \\( T \\) to \\( \\text{LOOKUP}(T, n) \\), e.g., from number to \\( () => \\) string for the member to string of number type. Finally, we connect each function type \\( (\\overline{p}) => R \\) and with its return type \\( R \\). For instance, \\( () => \\) string is connected with string. Examples of type graphs can be found in §2.2 and Figure 3. Note that these extension steps are abstract, in the sense that they focus on the type of the expression being extended and the resulting type after extension, not considering textual representation and parameters." + }, + { + "type": "table_caption", + "bbox": [ + 0.438, + 0.363, + 0.911, + 0.426 + ], + "angle": 0, + "content": "Table 1. Definition of DERIVABLE(x) for partial expressions introduced in Figure 4. \\(s \\leq s'\\) expresses that \\(s\\) is a prefix of \\(s'\\). pmatch(s, T) determines whether a prefix \\(s\\) partially matches the regular expression of literals of type \\(T\\)." + }, + { + "type": "table", + "bbox": [ + 0.438, + 0.427, + 0.907, + 0.567 + ], + "angle": 0, + "content": "
sDERIVABLE(qs)
l{T | pmatch(l,T),T ∈ {number, string, boolean}}
x{T | x ≤ n, (n : T) ∈ Γ}
(¯p) => e{ (¯p) => T | REACHABLE(DERIVABLE(qe),T)}
(e{T | REACHABLE(DERIVABLE(qe),T)}
e ⊙{T | ∃S': Γ ↔ e : S ∧ S ⊕ S': T}
e({R | Γ ↔ e: (¯p) => R}
e.a{S | a ≤ n, Γ ↔ e : T, LOOKUP(T,n) = S}
" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.09, + 0.083, + 0.496, + 0.099 + ], + "angle": 0, + "content": "Type-Constrained Code Generation with Language Models" + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.084, + 0.909, + 0.097 + ], + "angle": 0, + "content": "171:13" + }, + { + "type": "table", + "bbox": [ + 0.089, + 0.116, + 0.911, + 0.297 + ], + "angle": 0, + "content": "
Algorithm 2 Our type reachability search algorithm
Input: Current type T of some expression e, goal type G
Output: Whether G can be reached by extending e
1: function REACHABLE(T, G)
2: if T = G then return true▷ The goal type is successfully found
3: if T is marked then return false else mark T▷ Type T is marked to avoid cycles
4: for each valid extension step ⌿ from T do
5: S := the resulting type of applying ⌿ on T
6: if PRUNESEARCH(T, G, S) continue▷ Prune the search to ensure termination
7: if REACHABLE(S, G) return true▷ Recurse to the next round of extension
8: return false▷ No suitable extension is found
" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.322, + 0.911, + 0.438 + ], + "angle": 0, + "content": "The type reachability algorithm, Algorithm 2, implements a depth-first search over this type graph, starting from the current type \\( T \\), succeeding upon finding goal type \\( G \\) (Line 2), marking any visited types to prevent cycles (Line 3). Then, it proceeds to iterate over all valid extension steps from \\( T \\) (Line 4) and computes the resulting type \\( S \\) after the extension step is applied (Line 5). In the conceptualized type graph, as described in the previous paragraph, this is equivalent to exploring all outgoing edges from \\( T \\). At Line 7, we proceed to recursively search if \\( S \\) can reach \\( G \\). If all recursive calls are unsuccessful, the goal type can not be reached (Line 8)." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.439, + 0.91, + 0.653 + ], + "angle": 0, + "content": "Some programming languages define self-referential default members, e.g., clone in Java or value0f in TypeScript, which are nullary functions that return a value of the same type as the callee, \\((\\mathbf{\\beta})\\Rightarrow T\\) for type \\(T\\). When these members are accessed in functions, higher-order functions can be derived indefinitely. For instance, for a function \\(f\\) with type \\((\\mathbf{\\beta})\\Rightarrow S\\), \\(f.\\) value0f has the type \\((\\mathbf{\\beta})\\Rightarrow (\\mathbf{\\beta})\\Rightarrow S\\). We therefore need to restrict the type search to a finite set of types to ensure termination. At Line 6 of Algorithm 2, we add a heuristic PRUNESEARCH into the search, which decides where to prune the search process. We develop a simple heuristic based on the results from Gvero et al. [30]. This heuristic prunes exploration of types with higher complexity than goal or source type if they do not contain yet unexplored primitive types, thus preventing exploration of arbitrarily complex types. The details of this heuristic are presented in Appendix A.3. While ensuring termination, our heuristic leads to incompleteness and the potential rejection of well-typed expressions. However, this effect is less pronounced in practical usage, as only highly complex (thus less realistically used) types are avoided." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.655, + 0.603, + 0.673 + ], + "angle": 0, + "content": "We proceed to prove the soundness of Algorithm 2 below." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.68, + 0.913, + 0.714 + ], + "angle": 0, + "content": "LEMMA 3. The type search in Algorithm 2 is sound, i.e., for any expression \\( e \\) with \\( \\Gamma \\vdash e : T \\), if REACHABLE(T,G) holds, then there exists a sequence of extensions \\( y \\) such that \\( \\Gamma \\vdash e \\circ y : G \\)." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.722, + 0.912, + 0.824 + ], + "angle": 0, + "content": "Proof. By the design of Algorithm 2, if REACHABLE\\((T,G)\\) returns true, there is a sequence of \\(n\\) recursive calls to REACHABLE\\((T_i,G)\\), with \\(T_0 = T\\) and REACHABLE\\((T_n,G) = \\text{true}\\). Each \\(T_i\\) (\\(i > 0\\)) is derived because some extension \\(\\diamond_i\\) is applicable to \\(T_{i-1}\\) based on the typing rules of \\(L_B\\). We then convert each \\(\\diamond_i\\) to its concrete, textual version \\(\\spadesuit_i\\). This representation includes the required well-typed parameters of \\(\\spadesuit_i\\) (i.e., for binary operators and non-nullary functions), which are constructed using literals. Finally, we construct \\(y\\) as \\(\\spadesuit_1 \\circ \\ldots \\circ \\spadesuit_n\\)." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.832, + 0.912, + 0.918 + ], + "angle": 0, + "content": "Note that using any pruning heuristic at Line 6 of Algorithm 2 preserves soundness, which in turn is sufficient to preserve the required prefix property, as defined in Definition 2. We can conclude that the two-tiered search algorithm soundly determines whether the desired target type can be derived from some partial input. Therefore, we conclude that \\( A_{e} \\downarrow T \\) and \\( A_{e} \\) are prefix automata that parse a subset of well-typed expressions in \\( L_{B} \\)." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.093, + 0.085, + 0.136, + 0.095 + ], + "angle": 0, + "content": "171:14" + }, + { + "type": "header", + "bbox": [ + 0.341, + 0.084, + 0.908, + 0.097 + ], + "angle": 0, + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.118, + 0.909, + 0.152 + ], + "angle": 0, + "content": "COROLLARY 4. If REACHABLE( DERIVABLE(q), G) holds for any \\( q \\in \\gamma(I_e, s) \\) of a partial expression \\( s \\), then there exists a suffix \\( s' \\) such that \\( \\Gamma \\vdash s \\circ s': G \\)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.162, + 0.641, + 0.178 + ], + "angle": 0, + "content": "Proof. This conclusion follows directly from Lemmas 2 and 3." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.191, + 0.909, + 0.241 + ], + "angle": 0, + "content": "LEMMA 5. The language parsed by \\( A_e \\downarrow T \\) is thus a subset of the expressions of \\( L_B \\) of type \\( T \\), i.e., \\( L(A_e \\downarrow T) \\subseteq \\{s \\mid \\Gamma \\vdash s : T\\} \\). Since \\( A_e \\) recursively involves \\( A_e \\downarrow T \\), the language parsed by \\( A_e \\) is also a subset of well-typed expressions of \\( L_B \\), i.e., \\( L(A_e) \\subseteq \\{s \\mid \\exists T : \\Gamma \\vdash s : T\\} \\)." + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.255, + 0.435, + 0.271 + ], + "angle": 0, + "content": "3.5 Prefix Automata for Statements" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.276, + 0.909, + 0.409 + ], + "angle": 0, + "content": "We define the remaining automata to capture the complete language \\( L_{B} \\). The statement automaton is defined recursively as \\( A_{s} \\coloneqq A_{\\mathrm{DECL}} \\cup A_{\\mathrm{EXPR}} \\cup A_{\\mathrm{RET}} \\cup A_{\\mathrm{BLOCK}} \\cup A_{\\mathrm{FUN}} \\cup A_{\\mathrm{ITE}} \\). The declaration automaton \\( A_{\\mathrm{DECL}} \\coloneqq A_{\\mathrm{let} x: T} \\); captures undefined variable names \\( x \\) by accepting all strings, except for existing identifiers. This automaton is a prefix automaton since an accepting state can always be reached by appending characters to the declared identifier. The return statement automaton is \\( A_{\\emptyset} \\) when outside a function and restricts the parsed expression to the return type of the surrounding function otherwise. The remaining automata are mainly concatenations of previously defined automata and recursive invocations of \\( A_{s} \\), with small variations detailed in Appendix A.5." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.418, + 0.909, + 0.519 + ], + "angle": 0, + "content": "Tracking Type Environments. Generally, we follow the typing rules in Figure 6. Identifiers are passed on through all state transitions, matching the rule SEQ, where the type environment of consecutive statements needs to be compatible. However, in the cases of BLOCK, ITE and FUN, we discard the local type environment after parsing, matching the respective typing rules. In FUN additionally, the function signature and parameters are added into the type environment of the function body automaton, and the function signature in the environment of subsequent statements." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.527, + 0.909, + 0.644 + ], + "angle": 0, + "content": "Guaranteeing Return Types. When parsing the body of a function, the transition function of the function automata \\( A_{\\mathrm{FUN}} \\) maintains information about the declared return type and the encountered return statements (if any). \\( A_{\\mathrm{FUN}} \\) only accepts states where all return values match the declared return type and all execution paths inside the function body return, following \\( L_B \\)'s typing rules in Figure 7. If the current generated statements do not return in all execution paths, another statement is forced to be generated. Since we can always express the requested type through literals, a correct return statement can always be generated and the prefix automaton property is not violated." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.644, + 0.909, + 0.728 + ], + "angle": 0, + "content": "The described rules are straightforward to implement without violating the prefix property as all restrictions are derived only from already parsed input, e.g., the already defined identifiers or the previously declared function return type. We can therefore deduce that the statement automaton is a prefix automaton. Moreover, the automaton accepts all valid statements of \\( L_{B} \\), with the exception of well-typed expressions rejected by \\( A_{e} \\). Therefore the parsed language is a subset of \\( L_{B} \\)." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.737, + 0.786, + 0.754 + ], + "angle": 0, + "content": "LEMMA 6. With \\( A_M \\coloneqq A_{\\overline{s}} \\) it holds that \\( A_M \\) is a prefix automaton and \\( L(A_M) \\subseteq L_B \\)." + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.768, + 0.347, + 0.785 + ], + "angle": 0, + "content": "4 Extension to TypeScript" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.788, + 0.909, + 0.839 + ], + "angle": 0, + "content": "We extend our completion engine described in §3 to handle a core subset of modern TypeScript. In this section, we selectively discuss the implementation of several interesting TypeScript features. We provide a comprehensive list of supported and unsupported TypeScript features in Appendix B." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.848, + 0.909, + 0.915 + ], + "angle": 0, + "content": "Constant Variable Declarations. In addition to variable declaration using let, TypeScript supports constant declarations using const. This defines immutable identifiers. We thus additionally track mutability of each identifier in the type environment and disallow applying the assignment operator to immutable identifiers." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.091, + 0.083, + 0.496, + 0.097 + ], + "angle": 0, + "content": "Type-Constrained Code Generation with Language Models" + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.084, + 0.908, + 0.097 + ], + "angle": 0, + "content": "171:15" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.119, + 0.907, + 0.186 + ], + "angle": 0, + "content": "Arrays. We add support for array type annotation, parsing array expressions, and reading from and assigning to array fields. In array expressions, we enforce that all array elements have the same type. Moreover, array types introduce another dimension of type nesting. Therefore we adapt the type reachability pruning heuristic to handle this additional dimension to ensure termination." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.193, + 0.91, + 0.277 + ], + "angle": 0, + "content": "**Loops.** TypeScript supports various loop constructs, including for, while, do-while, and for...of loops. These are implemented mostly as variations of the statement block parser. The for...of loop uniquely constrains the right-hand side of the ..of operator to an array of any type. To adapt the type search, we introduce a generic array type \\(\\bullet[\\]\\), which matches any array type. For example, both types number[] and string[] match \\(\\bullet[\\]\\) in Line 2 of Algorithm 2." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.285, + 0.91, + 0.352 + ], + "angle": 0, + "content": "Additional Operators and Types. We add several arithmetic and logic operators, such as modulo \\(\\%\\) , exact equality \\(= = =\\) , logical or \\(||\\) , and the ternary operator \\(\\text{?}\\) :. To handle these operators, we add additional edges to the type search graph. Moreover, we add support for post- and prefix operators such as -- and ++, which are only valid extensions to mutable expressions." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.359, + 0.911, + 0.526 + ], + "angle": 0, + "content": "Operator Precedence. TypeScript defines an operator precedence, which determines the implicit grouping of expressions. For example \\( 1 + 2 \\). toString() is parsed as \\( 1 + (2 \\). toString()) . We adapt our expression parsing algorithm in two places to handle operator precedences. First, in the expression automaton, we leverage the knowledge about previously parsed extensions to determine the implicit grouping and thus where the next operator is applied. For example, for state \\( q^{1} + 2 \\), the member access extension \\( n \\) is applied to 2, as opposed to \\( 1 + 2 \\). Second, we adapt the type search in Algorithm 2. Concretely, we ensure that only extensions that can be validly applied based on operator precedence are iterated over. For this, we track the operator precedence of previously parsed extensions and extensions considered during the traversal of the type graph and omit operators in Line 5 that violate operator precedence." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.534, + 0.91, + 0.601 + ], + "angle": 0, + "content": "Global Identifiers and Imports. In TypeScript, many identifiers are defined globally and available in any execution. These global identifiers are incorporated by initializing the type environment of the program automaton accordingly. Identifiers such as Math introduce additional types, which we additionally implement. We also model the import of the crypto library using require." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.609, + 0.91, + 0.692 + ], + "angle": 0, + "content": "Polymorphic Built-In Members. The TypeScript LOOKUP implementation defines a few polymorphic members for built-in types. For example, for array \\( \\times \\) of type \\( T[] \\), \\( x \\). map(f) takes a callback function \\( f \\) and returns a new array \\( [f(x[0]), f(x[1]), \\ldots] \\). If \\( f \\) has type \\( (T) => P \\), the returned array has type \\( P[] \\). Here \\( P \\) is a type parameter, which is instantiated by matching the type of the passed function to the type pattern." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.692, + 0.91, + 0.793 + ], + "angle": 0, + "content": "We support such polymorphisms by adapting the type search. We track type patterns and enforce that type parameters are instantiated before the goal type is reached. We then continue the search from the instantiated version. In the map example, when searching completions of x.map, we first search for functions that instantiate the type parameter, and then continue the search from the instantiated type. When anonymous functions are generated as call parameters, we enforce that the function matches the searched type pattern." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.8, + 0.911, + 0.918 + ], + "angle": 0, + "content": "Type Annotations. TypeScript is designed to be flexible, allowing many type annotations to be omitted when they can be automatically inferred. We generally support this, such as inferring types from initial values. However, it can lead to unexpected types when annotations are omitted, often confusing even experienced developers [47, 48]. Moreover, in the context of LLM-based code generation, having more type annotations can provide valuable information for both the model and our type-constraining algorithms. We have identified three situations where generated code often fails to compile without type annotations, prompting us to enforce them. First, we require" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.093, + 0.085, + 0.137, + 0.096 + ], + "angle": 0, + "content": "171:16" + }, + { + "type": "header", + "bbox": [ + 0.341, + 0.084, + 0.908, + 0.097 + ], + "angle": 0, + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.118, + 0.906, + 0.185 + ], + "angle": 0, + "content": "annotations for all function parameters and return types. Second, all variable declarations must either have a type annotation or be initialized with a value. Third, we enforce type annotations for the first parameter of anonymous functions used as callbacks in the polymorphic built-in member reduce. These constraints trade off practical correctness with theoretical language completeness." + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.199, + 0.356, + 0.215 + ], + "angle": 0, + "content": "5 Experimental Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.22, + 0.906, + 0.269 + ], + "angle": 0, + "content": "We present an extensive evaluation of our type constraining approach on a variety of tasks and models. We outline our experimental setup (§5.1), evaluate the impact on compilation errors and functional correctness (§5.2), perform runtime analysis (§5.3), and present case studies (§5.4)." + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.284, + 0.325, + 0.3 + ], + "angle": 0, + "content": "5.1 Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.304, + 0.906, + 0.353 + ], + "angle": 0, + "content": "We now outline our main evaluation setup, covering implementation, evaluated tasks, considered models, compared methods, and metrics. We provide further setup details and hyperparameter choices in Appendix B." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.363, + 0.906, + 0.412 + ], + "angle": 0, + "content": "Implementation. Our implementation is written in Python and contains 11249 lines of code. To ensure robust implementation, we built a large set of around four hundred unit tests and frequently compared the behaviors of our implementation with the official TypeScript compiler [42]." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.423, + 0.767, + 0.438 + ], + "angle": 0, + "content": "Tasks and Benchmarks. We evaluate three relevant tasks of code generation:" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.444, + 0.906, + 0.475 + ], + "angle": 0, + "content": "- Synthesis: Given a natural language task description and a function header, the task is to generate a solution from scratch." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.477, + 0.906, + 0.509 + ], + "angle": 0, + "content": "- Translation: Given a function written in Python and the header of an equivalent TypeScript function, the task is to generate the body of the equivalent function in TypeScript." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.51, + 0.906, + 0.559 + ], + "angle": 0, + "content": "- Repair: Given a natural language task description, a non-compilable solution, the corresponding compiler error, and the function header, the task is to restore functionality of the flawed solution by resolving the compilation error." + }, + { + "type": "list", + "bbox": [ + 0.11, + 0.444, + 0.906, + 0.559 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.564, + 0.906, + 0.68 + ], + "angle": 0, + "content": "The benchmarks for these tasks are based on TypeScript-translated tasks from HumanEval [12] and MBPP [5], contained in the MultiPL-E dataset [13], with 159 and 384 instances each. We observe that success in generating valid code for the same sample can vary depending on the random seed used. To obtain more comprehensive results on the small HumanEval dataset, we generate each sample 4 times with different seeds and aggregate the outcomes. In MBPP, we generate each sample once. For Repair, we collect all non-compiling programs from the unconstrained synthesis task for all models, resulting in 292 and 248 instances for HumanEval and MBPP each." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.69, + 0.906, + 0.788 + ], + "angle": 0, + "content": "Models. We use 6 different open-weight LLMs, covering 3 LLMs of varying parameter sizes from the same model family and 4 models of a similar size from different model families: the Gemma 2 model family with 2B/9B/27B parameters [64], DeepSeekCoder 33B (abbreviated as DSCoder 33B) [28], CodeLlama 34B [59], and Qwen2.5 32B [73]. For all evaluated LLMs we choose the instruction-tuned variants, which are fine-tuned to follow instructions in a chat-style interaction, such that they adequately attempt to resolve the presented tasks." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.799, + 0.906, + 0.914 + ], + "angle": 0, + "content": "Compared Methods. We run unconstrained LLM sampling, reported as Vanilla. We measure the upper bound improvement of prior syntactic constraining methods [8, 57, 66] by assuming that all syntactically incorrect instances generated by Vanilla could be compiled under syntactic constraining. We refer to this improvement as idealized Syntax. We separately sample using type-constrained decoding based on our completion engine introduced in §3 and §4, and report it as Types. Due to the size and complexity of the full TypeScript compiler, featuring over 427,105 lines of code in 698 files [42], our extension does not cover all features of TypeScript. We therefore" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.091, + 0.083, + 0.496, + 0.098 + ], + "angle": 0, + "content": "Type-Constrained Code Generation with Language Models" + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.084, + 0.907, + 0.096 + ], + "angle": 0, + "content": "171:17" + }, + { + "type": "table_caption", + "bbox": [ + 0.088, + 0.117, + 0.907, + 0.179 + ], + "angle": 0, + "content": "Table 2. Number of instances with compiler errors in unconstrained generation (Vanilla), idealized syntax-only constraining (Syntax), and our proposed type constraining (Types). Type constraining reduces compiler errors by \\(74.8\\%\\) and \\(56.0\\%\\) in the synthesis of HumanEval and MBPP problems respectively, compared to only \\(9.0\\%\\) and \\(4.8\\%\\) ideal improvement on the two datasets respectively through syntax-only constraining." + }, + { + "type": "table", + "bbox": [ + 0.092, + 0.182, + 0.905, + 0.431 + ], + "angle": 0, + "content": "
ModelSynthesisTranslationRepair
VanillaSyntaxTypesVanillaSyntaxTypesVanillaSyntaxTypes
HumanEvalGemma 2 2B10392↓10.7%44↓57.3%177149↓15.8%80↓54.8%194181↓6.7%103↓46.9%
Gemma 2 9B4541↓8.9%13↓71.1%7563↓16.0%16↓78.7%113108↓4.4%52↓54.0%
Gemma 2 27B1513↓13.3%2↓86.7%2020↓0.0%3↓85.0%4540↓11.1%22↓51.1%
DS Coder 33B2625↓3.8%5↓80.8%1817↓5.6%7↓61.1%3636↓0.0%15↓58.3%
CodeLlama 34B8671↓17.4%28↓67.4%158124↓21.5%59↓62.7%153142↓7.2%48↓68.6%
Qwen2.5 32B1717↓0.0%2↓88.2%2421↓12.5%5↓79.2%3634↓5.6%13↓63.9%
MBPPGemma 2 2B6764↓4.5%27↓59.7%126111↓11.9%79↓37.3%194184↓5.2%108↓44.3%
Gemma 2 9B3029↓3.3%10↓66.7%6761↓9.0%33↓50.7%129124↓3.9%63↓51.2%
Gemma 2 27B2019↓5.0%7↓65.0%3736↓2.7%22↓40.5%7169↓2.8%32↓54.9%
DS Coder 33B3232↓0.0%19↓40.6%2927↓6.9%13↓55.2%9090↓0.0%43↓52.2%
CodeLlama 34B8071↓11.2%41↓48.8%126114↓9.5%54↓57.1%157148↓5.7%76↓51.6%
Qwen2.5 32B1918↓5.3%13↓31.6%2222↓0.0%16↓27.3%5552↓5.5%29↓47.3%
" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.461, + 0.909, + 0.529 + ], + "angle": 0, + "content": "emulate a type constraining that supports the entire TypeScript feature set. Concretely, if a sample compiles correctly without any constraining, we report it as-is. Otherwise, we report the result of a constrained resample. For all methods, if generation takes more than 300 seconds, we report the partial program generated until the timeout." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.537, + 0.91, + 0.622 + ], + "angle": 0, + "content": "Metrics. We compute two main metrics to assess the effectiveness of the compared methods. First, we determine the number of compiler errors in model-generated outputs. We count as a compiler error any case in which the TypeScript compiler [42] reports an issue during compilation. To measure functional correctness, we leverage the pass@1 metric [14], which measures the percentage of code generations that pass the provided unit tests given only one trial." + }, + { + "type": "title", + "bbox": [ + 0.089, + 0.635, + 0.624, + 0.651 + ], + "angle": 0, + "content": "5.2 Results on Compilation and Functional Correctness" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.656, + 0.91, + 0.723 + ], + "angle": 0, + "content": "In this section, we present our experimental results, showing that on all three code-generation-related tasks, our type constraining approach significantly improves the considered LLMs in generating both compileable and functionally correct code. It also substantially outperforms syntax-only constraining." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.732, + 0.909, + 0.881 + ], + "angle": 0, + "content": "Reduction of Compilation Errors. In Table 2, we present the number of compilation errors produced by each compared method. For synthesis and translation, in the unconstrained setting (Vanilla), on average only \\(9.0\\%\\) and \\(4.9\\%\\) of the non-compiling instances in HumanEval and MBPP respectively are due to syntactic errors (Syntax), with Qwen2.5 32B even making no syntax errors at all for HumanEval synthesis and MBPP translation. In contrast, type constraining reduces compilation errors by more than half, i.e., by \\(75.3\\%\\) and \\(52.1\\%\\) on HumanEval and MBPP respectively. We observe that models across all sizes and families benefit similarly from our constraining, with a minimum error reduction of \\(54.8\\%\\) and \\(27.3\\%\\) on HumanEval and MBPP respectively, highlighting the general effectiveness of our approach." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.882, + 0.909, + 0.915 + ], + "angle": 0, + "content": "A straightforward way to improve successful compilation of LLM-generated code is to feed the erroneous code and the error message back to an LLM for correction – our repair task. Thanks" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.093, + 0.085, + 0.136, + 0.095 + ], + "angle": 0, + "content": "171:18" + }, + { + "type": "header", + "bbox": [ + 0.341, + 0.084, + 0.908, + 0.097 + ], + "angle": 0, + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + }, + { + "type": "table_caption", + "bbox": [ + 0.092, + 0.118, + 0.907, + 0.147 + ], + "angle": 0, + "content": "Table 3. pass@1 of unconstrained generation (Vanilla) and type constraining (Types). The benefit of our type-constraining approach transfers from reduced compilation errors to improved functional correctness." + }, + { + "type": "table", + "bbox": [ + 0.103, + 0.15, + 0.894, + 0.398 + ], + "angle": 0, + "content": "
ModelSynthesisTranslationRepair
VanillaTypesVanillaTypesVanillaTypes
HumanEvalGemma 2 2B29.130.2↑3.8%50.253.9↑7.5%11.620.9↑79.4%
Gemma 2 9B56.658.3↑3.1%73.778.3↑6.2%24.034.9↑45.7%
Gemma 2 27B69.571.2↑2.5%86.687.7↑1.3%38.441.1↑7.1%
DS Coder 33B68.971.1↑3.2%88.790.1↑1.6%47.650.7↑6.5%
CodeLlama 34B41.043.4↑5.7%58.663.5↑8.3%17.527.4↑56.9%
Qwen2.5 32B79.681.8↑2.8%92.193.9↑1.9%65.471.2↑8.9%
MBPPGemma 2 2B40.442.4↑5.2%52.356.0↑7.0%12.122.6↑86.7%
Gemma 2 9B65.467.4↑3.2%71.475.8↑6.2%24.231.9↑31.7%
Gemma 2 27B70.672.1↑2.2%83.184.4↑1.6%39.145.2↑15.5%
DS Coder 33B65.467.2↑2.8%85.989.1↑3.6%35.143.1↑23.0%
CodeLlama 34B42.245.6↑8.0%55.763.3↑13.6%15.726.6↑69.2%
Qwen2.5 32B76.376.6↑0.3%89.690.4↑0.9%48.054.0↑12.6%
" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.424, + 0.907, + 0.54 + ], + "angle": 0, + "content": "to its general applicability, our type constraining approach can also enhance this process. Our experimental results in the setting of code repair are also depicted in Table 2. We find that, in the vanilla setting, many models struggle to correctly localize and resolve compilation errors, with Gemma 2 2B for example repairing only \\(33.5\\%\\) and \\(25.8\\%\\) of the non-compiling HumanEval and MBPP instances, respectively. This is substantially increased to \\(56.4\\%\\) and \\(58.4\\%\\) through type constraining. On average, using type-constrained sampling, \\(53.7\\%\\) more compilation errors are resolved than using vanilla LLM decoding." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.548, + 0.909, + 0.682 + ], + "angle": 0, + "content": "Improving Functional Correctness. Programs that do not compile are always functionally incorrect. With our type constraining method, non-compilable generations can be turned into well-formed ones, offering the possibility of achieving functional correctness. In Table 3, we experimentally show that type constraining universally improves the functional correctness of LLM-generated code. On the three tasks considered, employing type constraining improves LLMs' pass@1 rate, achieving an average increase by \\(3.5\\%\\) in synthesis, \\(5.0\\%\\) in translation, and \\(37.0\\%\\) in repair tasks. The larger improvement in the latter is due to vanilla LLMs generally struggling to generate functionally correct code. One interesting phenomenon is that, for stronger models," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.683, + 0.563, + 0.78 + ], + "angle": 0, + "content": "constraints more likely lead to recovering functionally correct code. For example on the synthesis task, for Gemma 2 27B, out of the 26 instances that required resampling to compile successfully, 17 are also functionally correct. For Qwen2.5 32B, 15 out of 21 such instances were correct." + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.795, + 0.305, + 0.812 + ], + "angle": 0, + "content": "5.3 Runtime Analysis" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.816, + 0.563, + 0.881 + ], + "angle": 0, + "content": "As discussed in §2, compared with vanilla LLM decoding, our constrained decoding algorithm runs an additional loop (Line 4 of Algorithm 1), where tokens are sampled from an LLM-produced next-token probability distribu" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.882, + 0.907, + 0.915 + ], + "angle": 0, + "content": "tion and checked against the completion engine. In this section, we investigate how this process introduces additional runtime overhead for our type constraining. Note that for each selected token," + }, + { + "type": "table_caption", + "bbox": [ + 0.578, + 0.686, + 0.907, + 0.747 + ], + "angle": 0, + "content": "Table 4. Median time per synthesis instance in seconds spent by our type-constrained decoding and its relative increase compared with unconstrained decoding (Vanilla)." + }, + { + "type": "table", + "bbox": [ + 0.579, + 0.75, + 0.905, + 0.876 + ], + "angle": 0, + "content": "
ModelHumanEvalMBPP
Gemma 2 2B6.7↑38.3%6.3↑35.4%
Gemma 2 9B8.3↑29.2%9.5↑46.8%
Gemma 2 27B11.7↑19.9%11.7↑32.8%
DS Coder 33B11.5↑36.2%9.4↑59.5%
CodeLlama 34B7.6↑40.8%7.0↑37.6%
Qwen2.5 32B7.3↑39.6%4.9↑54.8%
" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.091, + 0.083, + 0.496, + 0.098 + ], + "angle": 0, + "content": "Type-Constrained Code Generation with Language Models" + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.084, + 0.909, + 0.097 + ], + "angle": 0, + "content": "171:19" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.119, + 0.909, + 0.153 + ], + "angle": 0, + "content": "vanilla and constrained decoding both run LLM inference only once, meaning that there is no extra overhead from LLM inference in constrained decoding." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.163, + 0.911, + 0.314 + ], + "angle": 0, + "content": "Overhead of Type Constraining. For an application of our method in practice, the effective runtime increase due to constrained decoding is highly relevant. To assess it, we measure the runtime per synthesis instance in HumanEval and MBPP for both unconstrained and type-constrained decoding. We report the median runtime per instance for type constraining and its relative increase to unconstrained decoding in Table 4. On average over the evaluated models, we observe a relative increase of \\(39.1\\%\\) and \\(52.1\\%\\) in HumanEval and MBPP respectively. We consider this impact to be bearable for the observed significant decrease in compilation errors. Moreover, this is measured on an unoptimized, Python-based implementation and could be significantly improved by a more system-oriented implementation, such as the one proposed by Dong et al. [18]." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.322, + 0.568, + 0.554 + ], + "angle": 0, + "content": "Number of Sample-and-Check Loop Iterations. To provide an in-depth analysis of the overhead of our type constraining method, we measure the number of iterations spent by the sample-and-check loop to find an admissible token. The results are provided in Figure 8. We observe that the number of loop iterations follows a long-tail distribution. For \\(99.4\\%\\) of cases, only one loop iteration is needed. This number is even higher for stronger models, with Gemma 2 9B and 27B requiring one iteration in \\(99.6\\%\\) and \\(99.9\\%\\) of cases, respectively. This means that, in most instances, LLMs can generate a valid token on the first attempt, which is then verified by the completion engine. In cases where more than one iteration is needed, the completion engine intervenes to guide the selection" + }, + { + "type": "image", + "bbox": [ + 0.586, + 0.328, + 0.9, + 0.472 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.575, + 0.476, + 0.912, + 0.552 + ], + "angle": 0, + "content": "Figure 8. Histogram on the number of iterations consumed by the sample-and-check loop at Line 4 of Algorithm 1 to find a valid token, measured with Gemma 2 2B for HumanEval synthesis." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.555, + 0.91, + 0.587 + ], + "angle": 0, + "content": "of valid tokens. These interventions help resolve errors in many instances in our benchmarks, providing significant benefit, as discussed in §5.2." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.588, + 0.912, + 0.738 + ], + "angle": 0, + "content": "Prior work [8, 57, 66] implemented constrained decoding differently than Algorithm 1. Instead of running the sample-and-check loop, they execute the completion engine for all tokens in the LLM's vocabulary, mask out all invalid tokens, and sample once from the remaining valid tokens based on their normalized likelihoods. This implementation is less efficient than ours, especially when calling the completion engine is costly. Based on the long-tail distribution depicted in Figure 8, our implementation requires only a single invocation to the completion engine for most cases, whereas their implementation calls it for every token in the LLM's vocabulary for all cases. In our experiments, their implementation results in timeouts for all benchmark instances, with a timeout of 5 minutes per instance. This justifies our choice of using the sample-and-check loop." + }, + { + "type": "title", + "bbox": [ + 0.089, + 0.752, + 0.248, + 0.768 + ], + "angle": 0, + "content": "5.4 Case Study" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.772, + 0.91, + 0.824 + ], + "angle": 0, + "content": "For a qualitative evaluation, we manually inspect instances where unconstrained decoding fails and our type constraining approach successfully corrects errors. We find that our technique effectively amends various types of compilation errors. We showcase three such examples in Figure 9." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.832, + 0.911, + 0.916 + ], + "angle": 0, + "content": "Wrong Parameter Count. In Figure 9a, the task is to find all words in a string that contain a number of consonants. When Gemma 2 2B attempts to solve this problem, translating from the Python version to TypeScript, it calls split without arguments. This is allowed in Python, but not in TypeScript, where a single string argument is required. Type constraining correctly detects this and guides the generation to sample a space character as parameter." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.093, + 0.085, + 0.135, + 0.095 + ], + "angle": 0, + "content": "171:20" + }, + { + "type": "header", + "bbox": [ + 0.34, + 0.084, + 0.907, + 0.097 + ], + "angle": 0, + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + }, + { + "type": "table", + "bbox": [ + 0.099, + 0.116, + 0.904, + 0.339 + ], + "angle": 0, + "content": "
VanillaTypes
(a)// find all words with consonants function select_words(s: string): string[] { for (const word of s. split()// find all words with consonants function select_words(s: string): string[] { for (const word of s. split(' ')
(b)function largest_divisor(n: number): number { for(let i = n - 1; i > 0; i-- ) { if(n % i == 0) return i; } // No return }function largest_divisor(n: number): number { for(let i = n - 1; i > 0; i-- ) { if(n % i == 0) return i; } return 1; }
(c)// insert a delimiter between every element return nums.reduce( (acc, curr, index) => { acc.pushurr); return acc; }, [])// insert a delimiter between every element return nums.reduce( (acc: number[] , num, index) => { acc.pushurr); return acc; }, [])
" + }, + { + "type": "image_caption", + "bbox": [ + 0.088, + 0.346, + 0.907, + 0.456 + ], + "angle": 0, + "content": "Figure 9. Three examples illustrating the effect of type-constrained sampling. Left are unconstrained generations with problematic tokens highlighted in red, and right are type-constrained results with corrected tokens highlighted in green, adapted for clarity. In (a), Gemma 2 2B attempts to call split, missing required arguments. In (b), DeepSeekCoder 33B attempts to complete a function without a guaranteed return. The issue is resolved by forcing generation of another statement after the main loop. In (c), Gemma 2 9B calls reduce with an anonymous function without type annotation. This leads to an incorrect type inference for the first parameter. The issue is solved by guiding the model to add type annotation." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.491, + 0.906, + 0.59 + ], + "angle": 0, + "content": "Missing Return Statement. In Figure 9b, to complete function largest_divisor, the model must compute a straightforward divisor loop. DeepSeekCoder 33B Instruct [28] implements a correct loop, but does not guarantee returning a value in every execution path. When the return statement in the loop is never executed, e.g., for negative inputs, the function thus returns undefined, violating the type rules. Our method detects this issue and forces the generation of another statement in the function body, resulting in a correct fallback return statement." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.602, + 0.906, + 0.751 + ], + "angle": 0, + "content": "Incorrect Type Inference. In Figure 9c, the task is to insert a delimiter between every element in an array. Gemma 2 9B solves this with the reduce function. This generic function accepts two arguments; first, a callback function that is called consecutively for every element in the array and accumulates a result, second, an initial value for the callback function. The type of the accumulator of the callback is derived implicitly from the second argument, which is an empty array in the given example. TypeScript infers special type never[] for the empty array, disallowing inserting curr of type number through push. Therefore, the program fails to compile. This issue is a well-known limitation of the TypeScript compiler, often confusing even expert developers [47, 48]. Our method resolves it by enforcing adequate type annotation on the first argument of the callback function." + }, + { + "type": "title", + "bbox": [ + 0.089, + 0.767, + 0.226, + 0.781 + ], + "angle": 0, + "content": "6 Discussion" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.787, + 0.909, + 0.819 + ], + "angle": 0, + "content": "Our general type constraining approach, backed by strong experimental results, opens exciting avenues for future research, which we discuss below." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.832, + 0.91, + 0.915 + ], + "angle": 0, + "content": "Implementation Effort. Developing a completion engine for a target programming language currently requires manual efforts. However, we expect that the involved effort to adopt our method to other languages will be reduced significantly, as many features transfer from our implementation for \\( L_{B} \\) and TypeScript. Moreover, we believe that, due to the huge impact on LLM's code generation, the effort will pay off. Future programming language developers may consider generally writing" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.09, + 0.083, + 0.496, + 0.098 + ], + "angle": 0, + "content": "Type-Constrained Code Generation with Language Models" + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.084, + 0.906, + 0.097 + ], + "angle": 0, + "content": "171:21" + }, + { + "type": "code", + "bbox": [ + 0.096, + 0.122, + 0.901, + 0.186 + ], + "angle": 0, + "content": "function sort_threel(number[],r:number[]):number[]{ \nfor(let \\(\\mathrm{i} = 0\\) ;i<1.length; \\(\\mathrm{i + + }\\) ){ \nr.push(l[i].toString().slice(0,3).concat(l[i].ToString().split())'.split').reverse() .join(')).split''.reverse().join('').ToString() \\(^+\\) l[i].ToString().slice(3).split')…" + }, + { + "type": "image_caption", + "bbox": [ + 0.088, + 0.191, + 0.911, + 0.24 + ], + "angle": 0, + "content": "Figure 10. Complications errors remain when the model does not terminate after a corrected token. In this example for synthesis on the HumanEval task #33, CodeLlama 34B is steered away from accessing non-existing member .sort and instead accesses .string." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.274, + 0.908, + 0.309 + ], + "angle": 0, + "content": "their compilers as an incremental completion engine, which additionally enables automatic adoption for constrained code generation, besides conventional grammar parsing and type checking." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.318, + 0.91, + 0.568 + ], + "angle": 0, + "content": "Broader Application to More Complex Tasks and Stronger LLMs. Stronger LLMs, such as the latest OpenAI models [33], may make fewer typing errors on the HumanEval and MBPP datasets. Our evaluation results in Table 2 also demonstrate that compilation errors decrease with increasing model size for the Gemma family. However, recent findings showed that currently, even the strongest LLMs struggle with generating compilable code for more complex coding tasks, stricter typing rules, and low-resource languages (e.g., new DSLs). Gusanidas [29] evaluated various state-of-the-art LLMs on difficult code synthesis tasks in Rust, reporting compilation error rates of \\(18\\%\\) for OpenAI o1-mini [33], \\(39\\%\\) for DeepSeek R1 [15] and \\(27\\%\\) for Anthropic's Claude 3.5 Sonnet [2]. For OCaml and Haskell, which are sparsely represented in LLMs' training data, the error rate is even higher at \\(40\\% - 60\\%\\) for all models, matching a trend of worse performance on low-resource languages [24, 36]. Pan et al. [54] compiled a large dataset of code translation and found \\(44.3\\%\\) of GPT-4-generated code to contain compilation errors. Similarly, Shetty et al. [61] report around \\(25\\%\\) compilation errors for C-to-Rust translation using OpenAI o1 models. Our type constraining approach is broadly applicable to all these scenarios and our work presents a promising proof of concept. Future work can consider building upon our approach to address these challenges." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.568, + 0.91, + 0.651 + ], + "angle": 0, + "content": "Constrained decoding in general requires access to the next-token probability distributions produced by LLMs. Currently, commercially available black-box LLM APIs only return sampled tokens and do not offer complete next-token distributions. A possible solution is to integrate our method into the backend of model providers, as was recently implemented for guaranteeing adherence to JSON Schemas [3, 50]." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.662, + 0.911, + 0.78 + ], + "angle": 0, + "content": "Remaining Compiler Errors. We observe that, even though constrained decoding guarantees a valid result upon termination, a considerable amount of compilation errors remain due to non-termination within the token or time limit. We find this to be caused by generation loops, entered when generation is amended by constraints and the LLM is unable to recover. An example is depicted in Figure 10, where CodeLlama 34B tries to access the invalid member sort on an expression of type number. Future work may add additional constraints to force stopping such unconstructive loops and steer the model more strictly, e.g., by limiting the complexity of generated expressions." + }, + { + "type": "title", + "bbox": [ + 0.09, + 0.794, + 0.258, + 0.809 + ], + "angle": 0, + "content": "7 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.815, + 0.91, + 0.916 + ], + "angle": 0, + "content": "Code Language Models. Recently, LLMs have gained traction for diverse coding tasks such as code synthesis, repair, or translation [35]. These models are typically trained on datasets containing billions to trillions of tokens and have billions of parameters, with both factors contributing to improved performance in code-related benchmarks [28, 46, 59, 64]. Meanwhile, LLMs are well known to frequently make mistakes [32, 58], and, as we show in this work, even state-of-the-art open-weight models with over 30 billion parameters frequently make errors in code generation." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.093, + 0.085, + 0.136, + 0.095 + ], + "angle": 0, + "content": "171:22" + }, + { + "type": "header", + "bbox": [ + 0.341, + 0.084, + 0.908, + 0.097 + ], + "angle": 0, + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.118, + 0.909, + 0.268 + ], + "angle": 0, + "content": "Improving Language Model Accuracy. Apart from constrained decoding, three primary approaches have been proposed to enhance the accuracy of language models on code tasks: fine-tuning, retrieval augmentation (RAG), and compiler or execution feedback. Fine-tuning adapts the model weights based on specifically collected training data. This process is highly resource intensive [65, 70]. RAG provides the model with additional context based on a database or related code snippets [6, 57]. Compiler and execution feedback is only available after completing the model generation and requires resampling [16, 34, 69]. However, constrained decoding is orthogonal to these methods and, as indicated by Poesia et al. [57] and our experimental results, combining constrained decoding with RAG or compiler feedback additionally improves model performance." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.276, + 0.909, + 0.409 + ], + "angle": 0, + "content": "Constrained Decoding. Prior work on constrained decoding failed to achieve strong results due to its limitation to syntactic language features. Constraining to context-free languages has been explored extensively in recent work [7, 8, 57, 71]. Simple context-sensitive syntactic features, such as the space indentation in Python and the scope markers in Go have also been implemented [41, 66]. As demonstrated in §5, however, syntax errors on average account for only \\(6\\%\\) of compilation errors in recent code models. The rarity of syntax errors significantly reduces the potential of leveraging them for improvements in code correctness. Meanwhile, our type-constrained decoding more than halved compilation errors." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.418, + 0.909, + 0.583 + ], + "angle": 0, + "content": "Type Systems for Code Synthesis. Previous work that leveraged type systems for code synthesis was confined to specialized settings and unable to constrain general, complex program generation. Poesia et al. [57] proposed using known column names to guide SQL query generation. Gvero et al. [30] employed a search on the type graph for function call completion. Agrawal et al. [1] leverage language-server-generated type annotations for object member accesses. Blinn et al. [11] use language-server-derived type information to provide additional context to the LLM, but not to enforce hard constraints. Additionally, type constraints have been used to direct code synthesis based on specialized search procedures [22, 56, 69]. However, these methods are not compatible with LLM-based code generation. This limits their ability to exploit the powerful natural language and general-purpose capabilities of LLMs." + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.597, + 0.23, + 0.612 + ], + "angle": 0, + "content": "8 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.617, + 0.909, + 0.766 + ], + "angle": 0, + "content": "In this work, we explored how type systems in programming languages can be used to guide language models during decoding. Concretely, we design and implement prefix automata to perform type constraining for a foundational simply typed language and then extend it to the popular language TypeScript. We extensively evaluate the impact of using such constraints for code synthesis, translation, and repair and observe that we more than halve compilation errors on a diverse set of models and consistently increase functional correctness. We further explore qualitatively how the constraining positively impacts code generation. We conclude that such type constraining should be implemented for more programming languages, and has the potential to generally improve code generation in many domains." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.09, + 0.083, + 0.496, + 0.098 + ], + "angle": 0, + "content": "Type-Constrained Code Generation with Language Models" + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.084, + 0.909, + 0.097 + ], + "angle": 0, + "content": "171:23" + }, + { + "type": "title", + "bbox": [ + 0.09, + 0.119, + 0.281, + 0.137 + ], + "angle": 0, + "content": "Artifact Availability" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.139, + 0.908, + 0.175 + ], + "angle": 0, + "content": "The artifact for this paper, including source code, datasets, and reproductions scripts, is available on GitHub (https://github.com/eth-sri/type-constrained-code-generation) and Zenodo [45]." + }, + { + "type": "title", + "bbox": [ + 0.09, + 0.188, + 0.281, + 0.205 + ], + "angle": 0, + "content": "Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.208, + 0.908, + 0.243 + ], + "angle": 0, + "content": "We would like to thank the anonymous reviewers for their in-depth and constructive feedback, and the artifact reviewers for their feedback on our artifact accessibility." + }, + { + "type": "title", + "bbox": [ + 0.093, + 0.257, + 0.199, + 0.273 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.099, + 0.277, + 0.909, + 0.319 + ], + "angle": 0, + "content": "[1] Lakshya Agrawal, Aditya Kanade, Navin Goyal, Shuvendu K Lahiri, and Sriram Rajamani. 2023. Monitor-Guided Decoding of Code LMs with Static Analysis of Repository Context. In NeurIPS. https://openreview.net/forum?id=qPUbKxKvXq" + }, + { + "type": "ref_text", + "bbox": [ + 0.099, + 0.319, + 0.908, + 0.347 + ], + "angle": 0, + "content": "[2] Anthropic. [n.d.]. Claude 3 Model Card. https://assets.anthropic.com/m/61e7d27f8c8f5919/original/Claude-3-ModelCard.pdf Accessed: March 10, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.099, + 0.347, + 0.909, + 0.373 + ], + "angle": 0, + "content": "[3] Anthropic. 2025. JSON Mode. https://docs.anthropic.com/en/docs/build-with-claude/tool-use#json-mode Accessed: March 10, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.099, + 0.373, + 0.61, + 0.388 + ], + "angle": 0, + "content": "[4] Ken Arnold and James Gosling. 1996. The Java Programming Language." + }, + { + "type": "ref_text", + "bbox": [ + 0.099, + 0.388, + 0.909, + 0.43 + ], + "angle": 0, + "content": "[5] Jacob Austin, Augustus Odena, Maxwell I. Nye, Maarten Bosma, Henryk Michalewski, David Dohan, Ellen Jiang, Carrie J. Cai, Michael Terry, Quoc V. Le, et al. 2021. Program Synthesis with Large Language Models. arXiv Preprint (2021). https://arxiv.org/abs/2108.07732" + }, + { + "type": "ref_text", + "bbox": [ + 0.099, + 0.43, + 0.909, + 0.457 + ], + "angle": 0, + "content": "[6] Nastaran Bassamzadeh and Chhaya Methani. 2024. A Comparative Study of DSL Code Generation: Fine-Tuning vs. Optimized Retrieval Augmentation. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2407.02742" + }, + { + "type": "ref_text", + "bbox": [ + 0.099, + 0.457, + 0.909, + 0.485 + ], + "angle": 0, + "content": "[7] Luca Beurer-Kellner, Marc Fischer, and Martin Vechev. 2023. Prompting Is Programming: A Query Language for Large Language Models. PLDI (2023). https://doi.org/10.1145/3591300" + }, + { + "type": "ref_text", + "bbox": [ + 0.099, + 0.485, + 0.909, + 0.512 + ], + "angle": 0, + "content": "[8] Luca Beurer-Kellner, Marc Fischer, and Martin Vechev. 2024. Guiding LLMs The Right Way: Fast, Non-Invasive Constrained Generation. In ICML. https://openreview.net/forum?id=pXaEYzrFae" + }, + { + "type": "ref_text", + "bbox": [ + 0.099, + 0.512, + 0.909, + 0.54 + ], + "angle": 0, + "content": "[9] Satwik Bhattachamishra, Kabir Ahuja, and Navin Goyal. 2020. On the Ability and Limitations of Transformers to Recognize Formal Languages. In EMNLP. https://doi.org/10.18653/v1/2020.emnlp-main.576" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.54, + 0.801, + 0.554 + ], + "angle": 0, + "content": "[10] Gavin M. Bierman, Martin Abadi, and Mads Torgersen. 2014. Understanding TypeScript. In ECOOP." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.554, + 0.909, + 0.582 + ], + "angle": 0, + "content": "[11] Andrew Blinn, Xiang Li, June Hyung Kim, and Cyrus Omar. 2024. Statically Contextualizing Large Language Models with Typed Holes. OOPSLA (2024). https://doi.org/10.1145/3689728" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.582, + 0.909, + 0.623 + ], + "angle": 0, + "content": "[12] Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. 2020. Language Models are Few-Shot Learners. In NeurIPS. https://proceedings.neurips.cc/paper/2020/bash/1457c0d6bfcb4967418bf8ac142f64a-Abstract.html" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.623, + 0.909, + 0.665 + ], + "angle": 0, + "content": "[13] Federico Cassano, John Gouwar, Daniel Nguyen, Sydney Nguyen, Luna Phipps-Costin, Donald Pinckney, Ming-Ho Yee, Yangtian Zi, Carolyn Jane Anderson, Molly Q. Feldman, et al. 2023. MultiPL-E: A Scalable and Polyglot Approach to Benchmarking Neural Code Generation. IEEE Trans. Software Eng. (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.664, + 0.909, + 0.707 + ], + "angle": 0, + "content": "[14] Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Pondé de Oliveira Pinto, Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, Greg Brockman, et al. 2021. Evaluating Large Language Models Trained on Code. arXiv Preprint (2021). https://arxiv.org/abs/2107.03374" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.707, + 0.909, + 0.748 + ], + "angle": 0, + "content": "[15] DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, et al. 2025. DeepSeek-R1: Incentivizing Reasoning Capability in LLMs via Reinforcement Learning. arXiv Preprint (2025). https://doi.org/10.48550/arXiv.2501.12948" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.748, + 0.909, + 0.789 + ], + "angle": 0, + "content": "[16] Pantazis Deligiannis, Akash Lal, Nikita Mehrotra, Rishi Poddar, and Aseem Rastogi. 2025. RustAssistant: Using LLMs to Fix Compilation Errors in Rust Code. In ICSE. https://www.microsoft.com/en-us/research/publication/rustassistant-using-llms-to-fix-compiler-errors-in-rust-code/" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.789, + 0.909, + 0.817 + ], + "angle": 0, + "content": "[17] TypeScript Developers. [n.d.]. TypeScript: Documentation – More on Functions. https://www.typescriptlang.org/docs/handbook/2/functions.html#function-type-expressions Accessed: March 10, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.817, + 0.909, + 0.858 + ], + "angle": 0, + "content": "[18] Yixin Dong, Charlie F. Ruan, Yaxing Cai, Ruihang Lai, Ziyi Xu, Yilong Zhao, and Tianqi Chen. 2024. XGrammar: Flexible and Efficient Structured Generation Engine for Large Language Models. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2411.15100" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.858, + 0.675, + 0.873 + ], + "angle": 0, + "content": "[19] Alan AA Donovan and Brian W Kernighan. 2015. The Go programming language." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.873, + 0.909, + 0.915 + ], + "angle": 0, + "content": "[20] Shihan Dou, Haoxiang Jia, Shenxi Wu, Huiyuan Zheng, Weikang Zhou, Muling Wu, Mingxu Chai, Jessica Fan, Caishuang Huang, Yunbo Tao, et al. 2024. What's Wrong with Your Code Generated by Large Language Models? An Extensive Study. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2407.06153" + }, + { + "type": "list", + "bbox": [ + 0.092, + 0.277, + 0.909, + 0.915 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.093, + 0.085, + 0.136, + 0.095 + ], + "angle": 0, + "content": "171:24" + }, + { + "type": "header", + "bbox": [ + 0.341, + 0.084, + 0.908, + 0.097 + ], + "angle": 0, + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.12, + 0.909, + 0.149 + ], + "angle": 0, + "content": "[21] Javid Ebrahimi, Dhruv Gelda, and Wei Zhang. 2020. How Can Self-Attention Networks Recognize Dyck-n Languages?. In EMNLP. https://aclanthology.org/2020-findings-emnlp.384/" + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.149, + 0.908, + 0.177 + ], + "angle": 0, + "content": "[22] Jonás Fiala, Shachar Itzhaky, Peter Müller, Nadia Polikarpova, and Ilya Sergey. 2023. Leveraging Rust Types for Program Synthesis. PLDI (2023). https://doi.org/10.1145/3591278" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.177, + 0.907, + 0.203 + ], + "angle": 0, + "content": "[23] Zheng Gao, Christian Bird, and Earl T. Barr. 2017. To type or not to type: quantifying detectable bugs in JavaScript. In ICSE. https://doi.org/10.1109/ICSE.2017.75" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.204, + 0.907, + 0.232 + ], + "angle": 0, + "content": "[24] Alessandro Giagnorio, Alberto Martin-Lopez, and Gabriele Bavota. 2025. Enhancing Code Generation for Low-Resource Languages: No Silver Bullet. arXiv Preprint (2025). https://doi.org/10.48550/arXiv.2501.19085" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.232, + 0.476, + 0.245 + ], + "angle": 0, + "content": "[25] GitHub. [n.d.]. https://github.com/features/copilot" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.246, + 0.889, + 0.26 + ], + "angle": 0, + "content": "[26] GitHub. 2022. The top programming languages. https://octoverse.github.com/2022/top-programming-languages" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.26, + 0.908, + 0.3 + ], + "angle": 0, + "content": "[27] Aaron Grattaflori, Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Alex Vaughan, et al. 2024. The Llama 3 Herd of Models. ArXiv Preprint (2024). https://arxiv.org/abs/2407.21783" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.301, + 0.908, + 0.342 + ], + "angle": 0, + "content": "[28] Daya Guo, Qihao Zhu, Dejian Yang, Zhenda Xie, Kai Dong, Wentao Zhang, Guanting Chen, Xiao Bi, Y. Wu, Y. K. Li, et al. 2024. DeepSeek-Coder: When the Large Language Model Meets Programming - The Rise of Code Intelligence. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2401.14196" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.343, + 0.907, + 0.37 + ], + "angle": 0, + "content": "[29] Gusanidas. [n.d.]. Compilation Benchmark. https://github.com/Gusanidas/compilation-benchmark Accessed: March 10, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.371, + 0.907, + 0.398 + ], + "angle": 0, + "content": "[30] Tihomir Gvero, Viktor Kuncak, Ivan Kuraj, and Ruzica Piskac. 2013. Complete completion using types and weights. In PLDI. https://doi.org/10.1145/2491956.2462192" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.398, + 0.855, + 0.412 + ], + "angle": 0, + "content": "[31] John E. Hopcroft and Jeffrey D. Ullman. 1979. Introduction to Automata Theory, Languages and Computation." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.412, + 0.908, + 0.453 + ], + "angle": 0, + "content": "[32] Lei Huang, Wejiang Yu, Weitao Ma, Weihong Zhong, Zhangyin Feng, Haotian Wang, Qianglong Chen, Weihua Peng, Xiaocheng Feng, Bing Qin, et al. 2023. A Survey on Hallucination in Large Language Models: Principles, Taxonomy, Challenges, and Open Questions. arXiv Preprint (2023). https://doi.org/10.48550/arXiv.2311.05232" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.453, + 0.908, + 0.493 + ], + "angle": 0, + "content": "[33] Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. 2024. OpenAI o1 System Card. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2412.16720" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.494, + 0.907, + 0.536 + ], + "angle": 0, + "content": "[34] Prithwish Jana, Piyush Jha, Haoyang Ju, Gautham Kishore, Aryan Mahajan, and Vijay Ganesh. 2024. CoTran: An LLM-Based Code Translator Using Reinforcement Learning with Feedback from Compiler and Symbolic Execution. In ECAI (Frontiers in Artificial Intelligence and Applications). https://doi.org/10.3233/FAIA240968" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.536, + 0.907, + 0.564 + ], + "angle": 0, + "content": "[35] Juyong Jiang, Fan Wang, Jiasi Shen, Sungju Kim, and Sunghun Kim. 2024. A Survey on Large Language Models for Code Generation. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2406.00515" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.564, + 0.907, + 0.592 + ], + "angle": 0, + "content": "[36] Sathvik Joel, Jie JW Wu, and Fatemeh H. Fard. 2024. Survey on Code Generation for Low resource and Domain Specific Programming Languages. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2410.03981" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.592, + 0.908, + 0.633 + ], + "angle": 0, + "content": "[37] Anton Lozhkov, Raymond Li, Loubna Ben Allal, Federico Cassano, Joel Lamy-Poirier, Nouamane Tazi, Ao Tang, Dmytro Pykhtar, Jiawei Liu, Yuxiang Wei, et al. 2024. StarCoder 2 and The Stack v2: The Next Generation. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2402.19173" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.633, + 0.706, + 0.647 + ], + "angle": 0, + "content": "[38] Madnight. 2024. GitHub 2.0. https://madnight.github.io/git/#/pull_requestes/2024/1" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.648, + 0.908, + 0.674 + ], + "angle": 0, + "content": "[39] Harry G. Mairson. 2004. Linear lambda calculus and PTIME-completeness. J. Funct. Program. (2004). https://doi.org/10.1017/S0956796804005131" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.674, + 0.781, + 0.688 + ], + "angle": 0, + "content": "[40] Nicholas D Matsakis and Felix S Klock. 2014. The rust language. ACM SIGAda Ada Letters (2014)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.689, + 0.908, + 0.73 + ], + "angle": 0, + "content": "[41] Daniel Melcer, Nathan Fulton, Sanjay Krishna Gouda, and Haifeng Qian. 2024. Constrained Decoding for Fill-in-the-Middle Code Language Models via Efficient Left and Right Quotienting of Context-Sensitive Grammars. (2024). https://arxiv.org/abs/2402.17988" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.73, + 0.907, + 0.756 + ], + "angle": 0, + "content": "[42] Microsoft. 2024. TypeScript. https://github.com/microsoft/TypeScript. Accessed on November 9, 2024, commit #ef802b1." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.758, + 0.907, + 0.785 + ], + "angle": 0, + "content": "[43] John C. MITCHELL. 1990. Type Systems for Programming Languages. In Formal Models and Semantics. https://www.sciencedirect.com/science/article/pii/B9780444880741500135" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.786, + 0.908, + 0.827 + ], + "angle": 0, + "content": "[44] Niklas Muennighoff, Qian Liu, Armel Randy Zebaze, Qinkai Zheng, Binyuan Hui, Terry Yue Zhuo, Swayam Singh, Xiangru Tang, Leandro von Werra, and Shayne Longpre. 2024. OctoPack: Instruction Tuning Code Large Language Models. In ICLR. https://openreview.net/forum?id=mw1PWNSWZP" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.828, + 0.907, + 0.855 + ], + "angle": 0, + "content": "[45] Niels Mündler, Jingxuan He, Hao Wang, Koushik Sen, Dawn Song, and Martin Vechev. 2025. Reproduction Package for \"Type-Constrained Code Generation with Language Models\". doi:10.5281/zenodo.15355889" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.855, + 0.908, + 0.895 + ], + "angle": 0, + "content": "[46] Niels Mündler, Mark Niklas Müller, Jingxuan He, and Martin Vechev. 2024. SWT-Bench: Testing and Validating Real-World Bug-Fixes with Code Agents. In NeurIPS. http://papers.nips.cc/paper_files/paper/2024/bit/94f093b41fc2666376fb1f667fe282f3-AbsAbstract-Conference.html" + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.12, + 0.909, + 0.895 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.09, + 0.084, + 0.496, + 0.098 + ], + "angle": 0, + "content": "Type-Constrained Code Generation with Language Models" + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.084, + 0.906, + 0.096 + ], + "angle": 0, + "content": "171:25" + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.12, + 0.908, + 0.147 + ], + "angle": 0, + "content": "[47] nielstron. 2024. Incorrect type deducted for accumulator in reduce. https://github.com/microsoft/TypeScript/issues/59999." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.149, + 0.847, + 0.162 + ], + "angle": 0, + "content": "[48] nop33. 2024. Wrong inferred initial value in reduce. https://github.com/microsoft/TypeScript/issues/59863." + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.163, + 0.83, + 0.176 + ], + "angle": 0, + "content": "[49] OpenAI. 2023. GPT-4 Technical Report. arXiv Preprint (2023). https://doi.org/10.48550/arXiv.2303.08774" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.178, + 0.909, + 0.202 + ], + "angle": 0, + "content": "[50] OpenAI. 2025. Structured Outputs. https://platform.openai.com/docs/guides/structured-outputs Accessed: March 10, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.204, + 0.908, + 0.245 + ], + "angle": 0, + "content": "[51] Gabriel Orlanski, Kefan Xiao, Xavier Garcia, Jeffrey Hui, Joshua Howland, Jonathan Malmaud, Jacob Austin, Rishabh Singh, and Michele Catasta. 2023. Measuring the Impact of Programming Language Distribution. In ICML. https://proceedings.mlr.press/v202/orlanski23a.html" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.247, + 0.771, + 0.259 + ], + "angle": 0, + "content": "[52] oxc project. 2024. oxc - The Javascript Oxidation Compiler. https://github.com/oxc-project/oxc." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.261, + 0.907, + 0.3 + ], + "angle": 0, + "content": "[53] Rangeet Pan, Ali Reza Ibrahimzada, Rahul Krishna, Divya Sankar, Lambert Pouguem Wassi, Michele Merler, Boris Sobolev, Raju Pavuluri, Saurabh Sinha, and Reyhaneh Jabbarvand. 2024. Lost in Translation: A Study of Bugs Introduced by Large Language Models while Translating Code. In ICSE. https://doi.org/10.1145/3597503.3639226" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.302, + 0.907, + 0.342 + ], + "angle": 0, + "content": "[54] Rangeet Pan, Ali Reza Ibrahimzada, Rahul Krishna, Divya Sankar, Lambert Pouguem Wassi, Michele Merler, Boris Sobolev, Raju Pavuluri, Saurabh Sinha, and Reyhaneh Jabbarvand. 2024. Lost in Translation: A Study of Bugs Introduced by Large Language Models while Translating Code. In ICSE. https://doi.org/10.1145/3597503.3639226" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.344, + 0.908, + 0.383 + ], + "angle": 0, + "content": "[55] Hammond Pearce, Baleegh Ahmad, Benjamin Tan, Brendan Dolan-Gavitt, and Ramesh Karri. 2022. Asleep at the Keyboard? Assessing the Security of GitHub Copilot's Code Contributions. In S&P. https://doi.org/10.1109/SP46214.2022.9833571" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.385, + 0.907, + 0.41 + ], + "angle": 0, + "content": "[56] Daniel Perelman, Sumit Gulwani, Thomas Ball, and Dan Grossman. 2012. Type-directed completion of partial expressions. In PLDI. https://doi.org/10.1145/2254064.2254098" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.412, + 0.908, + 0.451 + ], + "angle": 0, + "content": "[57] Gabriel Poesia, Alex Polozov, Vu Le, Ashish Tiwari, Gustavo Soares, Christopher Meek, and Sumit Gulwani. 2022. Synchronesh: Reliable Code Generation from Pre-trained Language Models. In ICLR. https://openreview.net/forum?id=KmtVD97J43e" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.453, + 0.908, + 0.479 + ], + "angle": 0, + "content": "[58] Vipula Rawte, Amit P. Sheth, and Amitava Das. 2023. A Survey of Hallucination in Large Foundation Models. arXiv Preprint (2023). https://doi.org/10.48550/arXiv.2309.05922" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.481, + 0.909, + 0.522 + ], + "angle": 0, + "content": "[59] Baptiste Rozière, Jonas Gehring, Fabian Gloeckle, Sten Sootla, Itai Gat, Xiaqing Ellen Tan, Yossi Adi, Jingyu Liu, Tal Remez, Jérémy Rapin, et al. 2023. Code Llama: Open Foundation Models for Code. arXiv Preprint (2023). https://doi.org/10.48550/arXiv.2308.12950" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.523, + 0.907, + 0.55 + ], + "angle": 0, + "content": "[60] Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Neural Machine Translation of Rare Words with Subword Units. In ACL. https://doi.org/10.18653/v1/p16-1162" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.551, + 0.907, + 0.577 + ], + "angle": 0, + "content": "[61] Manish Shetty, Naman Jain, Adwait Godbole, Sanjit A. Seshia, and Koushik Sen. 2024. Syzygy: Dual Code-Test C to (safe) Rust Translation using LLMs and Dynamic Analysis. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2412.14234" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.579, + 0.907, + 0.604 + ], + "angle": 0, + "content": "[62] Vince Szabo, Dominik Winterer, and Zhendong Su. 2024. Compilation Quotient (CQ): A Metric for the Compilation Hardness of Programming Languages. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2406.04778" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.606, + 0.908, + 0.646 + ], + "angle": 0, + "content": "[63] Florian Tambon, Arghavan Moradi Dakhel, Amin Nikanjam, Foutse Khomh, Michel C. Desmarais, and Giuliano Antoniol. 2025. Bugs in large language models generated code: an empirical study. Empir. Softw. Eng. (2025). https://doi.org/10.1007/s10664-025-10614-4" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.647, + 0.907, + 0.688 + ], + "angle": 0, + "content": "[64] Gemma Team, Morgane Riviere, Shreya Pathak, Pier Giuseppe Sessa, Cassidy Hardin, Surya Bhupatiraju, Léonard Hussenot, Thomas Mesnard, Bobak Shahriari, Alexandre Ramé, et al. 2024. Gemma 2: Improving Open Language Models at a Practical Size. arXiv Preprint (2024). https://arxiv.org/abs/2408.00118" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.689, + 0.907, + 0.715 + ], + "angle": 0, + "content": "[65] Yun-Da Tsai, Mingjie Liu, and Haoxing Ren. 2024. Code Less, Align More: Efficient LLM Fine-tuning for Code Generation with Data Pruning. (2024). https://doi.org/10.48550/arXiv.2407.05040" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.717, + 0.907, + 0.744 + ], + "angle": 0, + "content": "[66] Shubham Ugare, Tarun Suresh, Hangoo Kang, Sasa Misailovic, and Gagandeep Singh. 2024. SynCode: LLM Generation with Grammar Augmentation. ArXiv Preprint (2024). https://arxiv.org/abs/2403.01632" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.745, + 0.907, + 0.771 + ], + "angle": 0, + "content": "[67] Pawel Urzyczyn. 1997. Inhabitation in Typed Lambda-Calculi (A Syntactic Approach). In TLCA (Lecture Notes in Computer Science). https://doi.org/10.1007/3-540-62688-3_47" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.772, + 0.907, + 0.798 + ], + "angle": 0, + "content": "[68] Heidi Vella. 2024. Google turns to AI to write new code; Workforce reduced. https://aibusiness.com/data/google-turns-to-ai-to-write-new-code-workforce-reduced" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.8, + 0.907, + 0.827 + ], + "angle": 0, + "content": "[69] Yuxiang Wei, Chunqiu Steven Xia, and Lingming Zhang. 2023. Copiloting the Copilots: Fusing Large Language Models with Completion Engines for Automated Program Repair. In ESEC/FSE. https://doi.org/10.1145/3611643.3616271" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.828, + 0.908, + 0.867 + ], + "angle": 0, + "content": "[70] Martin Weyssow, Xin Zhou, Kisub Kim, David Lo, and Houari A. Sahraoui. 2023. Exploring Parameter-Efficient Fine-Tuning Techniques for Code Generation with Large Language Models. arXiv Preprint (2023). https://doi.org/10.48550/arXiv.2308.10462" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.869, + 0.908, + 0.896 + ], + "angle": 0, + "content": "[71] Brandon T. Willard and Rémi Louf. 2023. Efficient Guided Generation for Large Language Models. arXiv Preprint (2023). https://doi.org/10.48550/arXiv.2307.09702" + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.12, + 0.909, + 0.896 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.092, + 0.084, + 0.138, + 0.096 + ], + "angle": 0, + "content": "171:26" + }, + { + "type": "header", + "bbox": [ + 0.34, + 0.083, + 0.909, + 0.099 + ], + "angle": 0, + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + }, + { + "type": "ref_text", + "bbox": [ + 0.09, + 0.12, + 0.912, + 0.161 + ], + "angle": 0, + "content": "[72] Andy Yang, David Chiang, and Dana Angluin. 2024. Masked Hard-Attention Transformers Recognize Exactly the Star-Free Languages. In NeurIPS. http://papers.nips.cc/paper_files/paper/2024/bit/13d7f172259b11b230cc5da8768abc5f-AAbstract-Conference.html" + }, + { + "type": "ref_text", + "bbox": [ + 0.09, + 0.162, + 0.911, + 0.19 + ], + "angle": 0, + "content": "[73] An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. 2024. Qwen2.5 Technical Report. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2412.15115" + }, + { + "type": "ref_text", + "bbox": [ + 0.09, + 0.19, + 0.911, + 0.232 + ], + "angle": 0, + "content": "[74] Quanjun Zhang, Chunrong Fang, Yang Xie, Yuxiang Ma, Weisong Sun, Yun Yang, and Zhenyu Chen. 2024. A Systematic Literature Review on Large Language Models for Automated Program Repair. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2405.01466" + }, + { + "type": "list", + "bbox": [ + 0.09, + 0.12, + 0.912, + 0.232 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.089, + 0.249, + 0.492, + 0.265 + ], + "angle": 0, + "content": "A Detailed Prefix Automaton Definitions" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.269, + 0.905, + 0.288 + ], + "angle": 0, + "content": "In this section, we provide more detailed definitions and analysis of the various automata for \\(L_{B}\\)." + }, + { + "type": "title", + "bbox": [ + 0.089, + 0.3, + 0.287, + 0.315 + ], + "angle": 0, + "content": "A.1 Base Automata" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.32, + 0.91, + 0.355 + ], + "angle": 0, + "content": "We now provide detailed definitions for the base prefix automata introduced at the end of §3.2: union, concatenation, Kleene-Star, and terminal." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.362, + 0.908, + 0.397 + ], + "angle": 0, + "content": "Union. For the union \\( A_X \\cup A_Y \\), we define the resulting sets of initial states and accepting states as \\( I \\coloneqq I_X \\cup I_Y \\) and \\( F \\coloneqq F_X \\cup F_Y \\), respectively. The transition function is defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.361, + 0.402, + 0.632, + 0.446 + ], + "angle": 0, + "content": "\\[\n\\delta (q, c) := \\left\\{ \\begin{array}{l l} \\delta_ {X} (q, c) & \\text {i f} q \\in Q _ {X} \\\\ \\delta_ {Y} (q, c) & \\text {i f} q \\in Q _ {Y}. \\end{array} \\right.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.451, + 0.908, + 0.487 + ], + "angle": 0, + "content": "To show that the language parsed by this automaton is indeed the union \\( L(A_{X} \\cup A_{Y}) = L(A_{X}) \\cup L(A_{Y}) \\), we employ a short helper lemma, which can be shown inductively." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.494, + 0.91, + 0.532 + ], + "angle": 0, + "content": "LEMMA 7. The set of the reachable states from a set of states \\(\\mathbf{q}\\) is equal to the union of reachable states from each state in \\(\\mathbf{q}\\), i.e., \\(\\gamma (\\mathbf{q},s) = \\bigcup_{q\\in \\mathbf{q}}\\gamma (q,s)\\)." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.537, + 0.91, + 0.587 + ], + "angle": 0, + "content": "Since the states are distinct and we merely combine the transition functions of both automata, using the lemma, we can quickly see that the language parsed is indeed the union. Moreover, if both \\( A_{X} \\) and \\( A_{Y} \\) are prefix automata, this also holds for \\( A_{X} \\cup A_{Y} \\)." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.596, + 0.908, + 0.629 + ], + "angle": 0, + "content": "Concatenation. For the concatenation automaton \\( A_{XY} \\), we define \\( I \\coloneqq I_X \\), \\( F \\coloneqq F_Y \\), and the transition function as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.287, + 0.632, + 0.709, + 0.695 + ], + "angle": 0, + "content": "\\[\n\\delta_ {X Y} (q, c) := \\left\\{ \\begin{array}{l l} \\delta_ {X} (q, c) & \\text {i f} q \\in Q _ {X} \\backslash F _ {X} \\\\ \\delta_ {X} (q, c) \\cup \\delta_ {Y} (I _ {Y}, c) & \\text {i f} q \\in F _ {X} \\\\ \\delta_ {Y} (q, c) & \\text {i f} q \\in Q _ {Y}. \\end{array} \\right.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.699, + 0.908, + 0.782 + ], + "angle": 0, + "content": "Informally, concatenation preserves the parsing behavior of both \\( A_{X} \\) and \\( A_{Y} \\) in their respective states. When \\( A_{XY} \\) reaches an accepting state of \\( A_{X} \\) and receives another input character, it either remains in \\( A_{X} \\) or transitions to \\( A_{Y} \\), as defined in the second case of \\( \\delta_{XY} \\). Essentially, this maintains outgoing edges from accepting states in \\( A_{X} \\) while adding edges from these accepting states to initial states of \\( A_{Y} \\)." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.782, + 0.908, + 0.882 + ], + "angle": 0, + "content": "It follows from a similar argument that \\( L(A_{XY}) = L(A_X) \\circ L(A_Y) \\), where \\( L(A_X) \\circ L(A_Y) \\) is defined as \\( \\{s_X \\circ s_Y \\mid s_X \\in L(A_X), s_Y \\in L(A_Y)\\} \\). We first show \\( L(A_{XY}) \\subseteq L(A_X) \\circ L(A_Y) \\). Due to (P1), we can always split any \\( s \\in L(A_{XY}) \\) into \\( s_X \\) that extends from \\( I_X \\) to \\( F_X \\) and \\( s_Y \\) that extends from \\( I_Y \\) to \\( F_Y \\). Then \\( s_X \\in L(A_X) \\) and \\( s_Y \\in L(A_Y) \\). For \\( L(A_X) \\circ L(A_Y) \\subseteq L(A_X \\circ A_Y) \\), we pick any \\( s_X \\circ s_Y \\) from \\( L(A_X) \\circ L(A_Y) \\) and parse it using \\( A_{XY} \\). We observe that it will first traverse from \\( I_X \\) to \\( F_X \\) consuming \\( s_X \\), and then transition through \\( I_Y \\) to \\( F_Y \\) by consuming \\( s_Y \\)." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.883, + 0.908, + 0.917 + ], + "angle": 0, + "content": "Moreover \\( A_{XY} \\) is a prefix automaton, if \\( A_{X} \\) and \\( A_{Y} \\) are prefix automata and \\( L(A_{Y}) \\neq \\emptyset \\). Since \\( A_{X} \\) is a prefix automaton, we can reach \\( F_{X} \\) from any state in \\( Q_{X} \\). From \\( F_{X} \\) we additionally reach" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.09, + 0.083, + 0.496, + 0.097 + ], + "angle": 0, + "content": "Type-Constrained Code Generation with Language Models" + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.084, + 0.907, + 0.096 + ], + "angle": 0, + "content": "171:27" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.118, + 0.907, + 0.153 + ], + "angle": 0, + "content": "\\(I_{Y} \\subseteq Q_{Y}\\). Since \\(A_{Y}\\) is a prefix automaton, we can reach \\(F_{Y}\\) for any state in \\(Q_{Y}\\). This construction is a prefix automaton only if \\(I_{Y} \\neq \\emptyset\\), which, due to the prefix property, is equivalent to \\(L(A_{Y}) \\neq \\emptyset\\)." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.161, + 0.907, + 0.211 + ], + "angle": 0, + "content": "Kleene-Star. We define the Kleene-Star automaton \\( A_{\\overline{X}} \\) that parses indefinite repetitions of words accepted by \\( X \\). First, we consider all initial states as final states, i.e., we ensure \\( I_X \\subseteq F_{\\overline{X}} \\). Then we add transitions to the transition function \\( \\delta_X \\) from the final states \\( F_X \\) back to the initial states \\( I_X \\)." + }, + { + "type": "equation", + "bbox": [ + 0.294, + 0.217, + 0.7, + 0.262 + ], + "angle": 0, + "content": "\\[\n\\delta_ {\\overline {{X}}} (q _ {X}, c) := \\left\\{ \\begin{array}{l l} \\delta_ {X} (q _ {X}, c) & \\text {i f q \\not \\in F _ {X}} \\\\ \\delta_ {X} (q _ {X}, c) \\cup \\delta (I _ {X}, c) & \\text {i f q _ {X} \\in F _ {X}}. \\end{array} \\right.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.267, + 0.907, + 0.351 + ], + "angle": 0, + "content": "We can quickly see that \\( L(A_{\\overline{X}}) = \\{\\overline{s} \\mid s \\in L(A_X)\\} \\), with the same argument as the concatenation automaton. Additionally, because the initial states are accepting, the empty word (zero repetitions) is in \\( L(A_{\\overline{X}}) \\). We similarly see that this is a prefix automaton if \\( A_{X} \\) is a prefix automaton. Note that here \\( L(A_{X}) \\neq \\emptyset \\) is not required. This is because if \\( L(A_{X}) \\neq \\emptyset \\), then \\( A_{\\overline{X}} = A_{X} = A_{\\emptyset} \\), which is still a prefix automaton." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.36, + 0.907, + 0.409 + ], + "angle": 0, + "content": "Terminals. The terminal automaton \\( A_{\\mathsf{S}} \\) parses exactly the terminal S. They accept the usual alphabet \\( \\Sigma \\) and feature the states \\( Q \\coloneqq \\{q_{\\mathsf{s}} \\mid \\mathsf{s} \\text{ is a suffix of S}\\} \\), \\( F \\coloneqq \\{q_{\\varepsilon}\\} \\), \\( I \\coloneqq \\{q_{\\mathsf{S}}\\} \\). The transition function \\( \\delta \\) is defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.36, + 0.414, + 0.635, + 0.458 + ], + "angle": 0, + "content": "\\[\n\\delta (q _ {s}, c) := \\left\\{ \\begin{array}{l l} \\{q _ {s ^ {\\prime}} \\} & \\text {i f c \\circ s ^ {\\prime} = s} \\\\ \\varnothing & \\text {o t h e r w i s e .} \\end{array} \\right.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.464, + 0.907, + 0.548 + ], + "angle": 0, + "content": "Clearly \\( A_{\\mathfrak{S}} \\) is a prefix automaton. We can show inductively that for any \\( s: \\gamma(q_{s}, s') = \\{q_{\\varepsilon}\\} \\Longleftrightarrow s = s' \\), and thus \\( L(A_{\\mathfrak{S}}) = \\{\\mathfrak{S}\\} \\). With a simple modification, we introduce \\( A_{\\mathfrak{s}}^{W} \\), where \\( W \\) denotes whitespace characters. The transition function is defined as \\( \\delta(q_{\\mathfrak{s}}^{W}, c) := \\{q_{\\mathfrak{s}}^{W}\\} \\) if \\( c \\in W \\); otherwise, \\( \\delta(q_{c \\circ s}^{W}, t) := \\{q_{\\mathfrak{s}}^{W}\\} \\). This allows arbitrary whitespace before parsing \\( s \\). This is how we implement syntactic indifference to whitespace between terminals." + }, + { + "type": "title", + "bbox": [ + 0.089, + 0.561, + 0.257, + 0.578 + ], + "angle": 0, + "content": "A.2 Expressions" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.582, + 0.907, + 0.615 + ], + "angle": 0, + "content": "Expressions are parsed using recursive automatons as introduced in §3.4. In this part of the Appendix, we describe in more detail how information is passed between states." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.624, + 0.907, + 0.708 + ], + "angle": 0, + "content": "Notation. In the following, we will implicitly assume that \\(\\delta(q, c) = \\emptyset\\) if not explicitly defined otherwise, making notation more concise. For any state, we access the following information through dot notation or the special notation on the state, which we assume is passed to subsequent states through the transition function (unless otherwise stated). This information is alternatively passed through to entire automata in composite automata, e.g., in \\(A_{XY}\\) from \\(A_X\\) to \\(A_Y\\)." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.712, + 0.696, + 0.728 + ], + "angle": 0, + "content": "- \\( q \\in F_X \\): Whether state \\( q \\) is an accepting state of the automaton \\( A_X \\)." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.729, + 0.697, + 0.744 + ], + "angle": 0, + "content": "- \\( q. \\Gamma \\): The type environment based on state \\( q \\) currently being parsed." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.745, + 0.907, + 0.778 + ], + "angle": 0, + "content": "- \\( q \\).LHS: The left-hand side expression of an extending expression represented by state \\( q \\), i.e., when extending \\( X \\) with \\( Y \\) and currently parsing \\( q_{Y} \\), then \\( q_{Y} \\).LHS = \\( X \\)." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.779, + 0.907, + 0.828 + ], + "angle": 0, + "content": "- \\( q \\).TYP: The described type of the last coherent expression that this state belongs to. This is only defined for accepting states. Generally, we ensure that when some expression \\( e \\) was parsed, the corresponding state \\( q_{e} \\) has attribute \\( q_{e} \\).TYP such that \\( q_{e} \\Gamma \\vdash e : q_{e} \\).TYP." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.829, + 0.513, + 0.844 + ], + "angle": 0, + "content": "- \\( q \\downarrow T \\): Type \\( T \\) to which state \\( q \\) is constrained." + }, + { + "type": "list", + "bbox": [ + 0.109, + 0.712, + 0.907, + 0.844 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.849, + 0.907, + 0.916 + ], + "angle": 0, + "content": "When accessing the properties of \\( A \\), we access the property of the current state of the automaton \\( q \\in Q \\), e.g., \\( A. \\mathrm{LHS} = q. \\mathrm{LHS} \\). For parsed automata, the current state is the final, accepting state. The TYP attribute expresses the type of the expression parsed so far. In expression states \\( q \\), we leverage the LHS to accurately determine \\( q. \\mathrm{TYP} \\)." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.092, + 0.084, + 0.138, + 0.096 + ], + "angle": 0, + "content": "171:28" + }, + { + "type": "header", + "bbox": [ + 0.34, + 0.083, + 0.908, + 0.098 + ], + "angle": 0, + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + }, + { + "type": "equation", + "bbox": [ + 0.113, + 0.117, + 0.898, + 0.204 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} q _ {\\mathrm {S T R . T Y P}} := \\text {s t r i n g} \\quad q _ {(e) . \\mathrm {T Y P}} := A _ {e}. \\mathrm {T Y P} \\\\ q _ {\\text {N U M}}. \\text {T Y P} := \\text {n u m b e r} \\quad q _ {\\odot e}. \\text {T Y P} := R, \\text {f o r} q _ {\\odot e}. \\text {L H S}. \\text {T Y P} = S, A _ {e}. \\text {T Y P} = T \\text {a n d} S \\odot T: R \\\\ q _ {\\text {B O O L . T Y P}} := \\text {b o o l e a n} \\quad q _ {(\\overline {{e}}). \\text {T Y P}} := T, \\text {f o r} q _ {(\\overline {{e}}). \\text {L H S . T Y P}} = (\\overline {{p}}) \\Rightarrow T \\\\ q _ {x. \\mathrm {T Y P}} := T \\text {w h e r e} q _ {x}. \\Gamma \\vdash x: T \\quad q. n. \\mathrm {T Y P} := T, \\text {f o r L O O K U P} (q. n. \\mathrm {L H S . T Y P}, n) = T \\\\ q _ {(\\overline {{p}}) = > e. T Y P} := \\left(A _ {\\overline {{p}}} ^ {-}. T Y P\\right) = > A _ {e}. T Y P \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.211, + 0.91, + 0.276 + ], + "angle": 0, + "content": "Unrestricted Expressions. The left-hand side of the currently parsed expression is used in the definition of automata for three extending expressions; arithmetic operators, function call, and member access. The arithmetic operator automaton constrains its states to those with valid operators, i.e.:" + }, + { + "type": "equation", + "bbox": [ + 0.335, + 0.278, + 0.66, + 0.315 + ], + "angle": 0, + "content": "\\[\nA_{\\odot e}:= \\bigcup_{\\exists R:A_{\\odot e}.LHS.TYP\\odot T = R}A_{\\odot}(\\circ A_{e}\\downarrow T).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.317, + 0.908, + 0.351 + ], + "angle": 0, + "content": "For function call, the automaton is only valid if the left-hand side is a function, and accepts only the valid signature." + }, + { + "type": "equation", + "bbox": [ + 0.233, + 0.358, + 0.762, + 0.401 + ], + "angle": 0, + "content": "\\[\nA _ {(\\overline {{e}})} := \\left\\{ \\begin{array}{l l} A _ {(} \\circ (A _ {\\overline {{e}}} \\downarrow A _ {\\overline {{p}}}. \\mathrm {T Y P}) \\circ A _ {)} & \\text {i f} A _ {(\\overline {{e}}). \\mathrm {L H S . T Y P}} = (\\overline {{p}}) \\Rightarrow T \\\\ A _ {\\emptyset} & \\text {o t h e r w i s e .} \\end{array} \\right.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.407, + 0.908, + 0.441 + ], + "angle": 0, + "content": "Finally, the member access automaton is a union of the automata that parses the attributes of the left-hand side expression. Or formally," + }, + { + "type": "equation", + "bbox": [ + 0.347, + 0.446, + 0.649, + 0.485 + ], + "angle": 0, + "content": "\\[\nA_{\\cdot n}:= \\bigcup_{\\exists T:\\text{LOOKUP}(A_{\\cdot n}.LHS.TYP},m) = T}A_{\\cdot \\mathfrak{m}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.492, + 0.908, + 0.526 + ], + "angle": 0, + "content": "Type-Restricted Expressions. The type-restricted versions of the automata are covered by the definitions presented in §3.4. We therefore do not separately list them here." + }, + { + "type": "title", + "bbox": [ + 0.088, + 0.539, + 0.377, + 0.555 + ], + "angle": 0, + "content": "A.3 Pruning the Type Search" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.559, + 0.91, + 0.608 + ], + "angle": 0, + "content": "We now present our heuristic for pruning the type search recursion from the prefix automaton for type-constrained expressions in §3.4, i.e., our implementation of PRUNESEARCH at Line 6 of Algorithm 2. The heuristic is based on the complexity and novelty of candidate types to explore." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.609, + 0.911, + 0.759 + ], + "angle": 0, + "content": "Based on the assumptions about the lookup function and the extension expressions in §3.1, we observe a restriction in the reachable types by extensions: from any given type, we reach itself, result types of arithmetic operators via op, return types through CALL, and member types through MEMBER. A higher-order type \\((\\cdot) \\Rightarrow T\\) does not allow access to types not reachable from \\(T\\). Consequently, we avoid exploring such higher-order types unless the target type is of higher order, or the higher-order type offers novel, yet unexplored types. For instance, in Figure 11, the type \\((\\cdot) \\Rightarrow\\) number is not explored, because it is more complex than both the initial and goal types, number and string, and does not contain any unexplored type. Meanwhile, \\((\\cdot) \\Rightarrow\\) string is explored, as it contains a novel string type." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.759, + 0.91, + 0.825 + ], + "angle": 0, + "content": "To formalize this understanding, we introduce the concepts about the depth and root types of a given type, denoted as \\( \\text{DEPTH} \\) and \\( \\text{ROOT} \\), respectively. \\( \\text{DEPTH} \\) measures the complexity of a type, specifically the order of a function, while \\( \\text{ROOT} \\) returns all types of minimal depth (e.g., string, number, and boolean) that constitute a higher-order type. They are defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.119, + 0.831, + 0.892, + 0.876 + ], + "angle": 0, + "content": "\\[\n\\operatorname {D E P T H} (T) := \\left\\{ \\begin{array}{l l} \\operatorname {D E P T H} (S) + 1 & \\text {i f} T = (\\overline {{p}}) \\Rightarrow S, \\\\ 0 & \\text {o t h e r w i s e .} \\end{array} \\right. \\qquad \\operatorname {R O O T} (T) := \\left\\{ \\begin{array}{l l} \\operatorname {R O O T} (S) & \\text {i f} T = (\\overline {{p}}) \\Rightarrow S, \\\\ \\{T \\} & \\text {o t h e r w i s e .} \\end{array} \\right.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.882, + 0.908, + 0.916 + ], + "angle": 0, + "content": "We leverage DEPTH and ROOT to implement PRUNESEARCH \\((T,G,S)\\) for a current type \\(T\\), a goal type \\(G\\), and a type \\(S\\) after an extension is applied on \\(T\\). In general, if \\(G\\) is not directly accessible" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.09, + 0.083, + 0.496, + 0.098 + ], + "angle": 0, + "content": "Type-Constrained Code Generation with Language Models" + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.084, + 0.909, + 0.097 + ], + "angle": 0, + "content": "171:29" + }, + { + "type": "image", + "bbox": [ + 0.25, + 0.114, + 0.745, + 0.242 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.088, + 0.247, + 0.912, + 0.314 + ], + "angle": 0, + "content": "Figure 11. An example search through the graph for type reachability, starting from \\( T = \\) number with the goal string, e.g., after parsing let x : string; \\( x = 1 \\). States and edges along the final path are marked in green and explored nodes in blue. The () => number node is not explored, as complex types are avoided by our heuristic. The node () => string is explored as it enables reaching new type string." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.358, + 0.909, + 0.425 + ], + "angle": 0, + "content": "from \\( T \\), it will also not be accessible from expressions with the same root types but greater depth, such as \\( () \\Rightarrow T \\). When \\( G \\) is of higher order, exploring up to the depth of \\( G \\) can be required, such as when \\( G = () \\Rightarrow ((.) => \\text{number}) \\). Based on these two ideas, we stop exploring \\( S \\) when \\( \\text{DEPTH}(S) > \\max(\\text{DEPTH}(G), \\text{DEPTH}(T)) \\)." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.425, + 0.91, + 0.525 + ], + "angle": 0, + "content": "Further, if a higher-depth function returns an unexplored type, we need to explore it. Sticking to the example in Figure 11, type number has the member toString of type () => string. The type string can only be reached by exploring the member access at depth 1. On the contrary, we do not explore a higher-depth function if it does not introduce novel types other than those explored. To achieve this, we adapt Algorithm 2 to additionally define a set of root types \\( R \\), which is initialized to an empty set and is updated by \\( R := R \\cup \\mathrm{root}(T) \\). We do not explore \\( S \\) if \\( \\mathrm{root}(S) \\subseteq R \\)." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.525, + 0.911, + 0.643 + ], + "angle": 0, + "content": "Taking the conjunction of the aforementioned two aspects, our pruning heuristic is implemented as PRUNESEARCH\\((T,G,S) \\coloneqq \\mathrm{DEPTH}(S) > \\max(\\mathrm{DEPTH}(T), \\mathrm{DEPTH}(S)) \\wedge \\mathrm{ROOT}(S) \\subseteq R\\). The restrictions based on depth and root types are based on the results of the rigorously analyzed search over succinct types by Gvero et al. [30]. This provides a robust heuristic for exploring as many relevant inhabitable types as possible. However, due to the additional complexity introduced by the lookup function, we can not guarantee completeness and instead refer to the strong empirical results in our evaluation in §5 as evidence of the search's high coverage." + }, + { + "type": "title", + "bbox": [ + 0.089, + 0.659, + 0.421, + 0.675 + ], + "angle": 0, + "content": "A.4 Implementation of DERIVABLE" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.68, + 0.909, + 0.78 + ], + "angle": 0, + "content": "Recall that in Table 1, DERIVABLE for function expressions are defined as: \\(\\mathrm{DERIVABLE}(q_{(\\overline{p})\\Rightarrow e})\\coloneqq \\{(\\overline{p})\\Rightarrow T\\mid \\mathrm{REACHABLE}(\\mathrm{DERIVABLE}(q_e),T)\\}\\). This involves constructing a type reachability graph and collecting all types \\(T\\) reachable from DERIVABLE \\((q_{e})\\). However, this process is intractable because \\(T\\) can be of arbitrarily high-order, as such there are infinitely many \\(T\\) to explore. A similar issue exists for grouped expressions, as their DERIVABLE function is also defined to enumerate reachable types. We introduce two optimization heuristics to address this problem." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.78, + 0.911, + 0.915 + ], + "angle": 0, + "content": "We first observe that DERIVABLE is always called within the context of an invocation of REACHABLE with target type \\( G \\), e.g., REACHABLE(DERIVABLE(q(\\overline{p}) => e), G) for function expressions. To compute DERIVABLE(q(\\overline{p}) => e), we enumerate all types present on the type graph represented by REACHABLE(DERIVABLE(q_e), G), which is finite due to application of the pruning heuristics in Appendix A.3. In other words, we bound the maximum complexity of considered types \\( T \\) using the pruning heuristic for reachability of target type \\( G \\). This leads to a sound but potentially incomplete version of DERIVABLE. However, since the final goal is to reach \\( G \\), this heuristic provides a practically useful set of all relevant derivable types." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.092, + 0.084, + 0.138, + 0.096 + ], + "angle": 0, + "content": "171:30" + }, + { + "type": "header", + "bbox": [ + 0.34, + 0.083, + 0.908, + 0.097 + ], + "angle": 0, + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.119, + 0.907, + 0.203 + ], + "angle": 0, + "content": "Second, we observe that the resulting two-tiered call REACHABLE( DERIVABLE \\((q_{(\\overline{p})} \\Rightarrow e)\\), \\(G\\)) can be integrated into a single call to further reduce the amount of explored types. Concretely, when discovering some type \\(M\\) in REACHABLE( DERIVABLE \\((q_e)\\), \\(G\\)), as per the previous heuristic, we allow transitioning directly to REACHABLE \\((\\overline{p}) \\Rightarrow M, G\\) to allow a depth-prioritizing exploration of the search graph. This allows us to efficiently discover a path to \\(G\\) if it exists." + }, + { + "type": "title", + "bbox": [ + 0.089, + 0.216, + 0.25, + 0.231 + ], + "angle": 0, + "content": "A.5 Statements" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.237, + 0.907, + 0.27 + ], + "angle": 0, + "content": "We define the remaining automata to capture the complete language from §3.1. To correctly handle function return types, we pass on related information when entering function bodies:" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.276, + 0.57, + 0.291 + ], + "angle": 0, + "content": "- \\( q.R \\): The expected return type of the current state \\( q \\)." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.292, + 0.868, + 0.308 + ], + "angle": 0, + "content": "- \\( q. \\) RETURNED: Whether the currently parsed program block has returned in all branches." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.308, + 0.907, + 0.357 + ], + "angle": 0, + "content": "- q.MUSTRETURN: Whether the currently parsed program block must return (i.e., If-Then-Else branches do not need to contain return statements even if a return type is expected of the surrounding code block)." + }, + { + "type": "list", + "bbox": [ + 0.109, + 0.276, + 0.907, + 0.357 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.363, + 0.909, + 0.546 + ], + "angle": 0, + "content": "The single statement automaton is another recursive definition, since some statements, e.g., If-Then-Else, can themselves contain statements. The statement automaton is defined recursively as \\( A_{s} \\coloneqq A_{\\mathrm{DECL}} \\cup A_{\\mathrm{EXPR}} \\cup A_{\\mathrm{RET}} \\cup A_{\\mathrm{BLOCK}} \\cup A_{\\mathrm{FUN}} \\cup A_{\\mathrm{ITE}} \\). The expression statement automaton and block automaton are simply defined as \\( A_{\\mathrm{EXPR}} \\coloneqq A_{e} \\); and \\( A_{\\mathrm{BLOCK}} \\coloneqq A_{\\{\\overline{s}\\}} \\). The declaration automaton \\( A_{\\mathrm{DECL}} \\coloneqq A_{\\mathrm{let} x:T} \\); captures variable names \\( x \\) using an automaton for non-existing identifiers, which works the same way as \\( A_{x} \\) except that it rejects terminals that match an existing variable. This automaton is a prefix automaton as well, since indefinite additional characters can be added to the variable name and there are only finitely many defined variables. The If-Then-Else automaton is defined using standard concatenation: \\( A_{\\mathrm{ITE}} \\coloneqq A_{\\mathrm{if}(e) s \\text{else}s} \\). The statements automaton \\( A_{\\overline{s}} \\), based on the Kleene-Star automaton definition and the single statement automaton. Return statements are only non-empty when the expected return type is set, i.e. when parsing inside a function:" + }, + { + "type": "equation", + "bbox": [ + 0.317, + 0.553, + 0.677, + 0.598 + ], + "angle": 0, + "content": "\\[\nA _ {\\mathrm {R E T}} := \\left\\{ \\begin{array}{l l} A _ {\\mathrm {r e t u r n}} \\circ A _ {e} \\downarrow T & \\text {i f} A _ {\\mathrm {R E T}}. R = T \\\\ A _ {\\emptyset} & \\text {o t h e r w i s e .} \\end{array} \\right.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.606, + 0.91, + 0.638 + ], + "angle": 0, + "content": "For functions, the automaton is based on the standard concatenation \\( A_{\\text{FUN}} \\coloneqq A_{\\text{function } x(\\overline{p}):T(\\overline{s})} \\). However, the transition function updates the states of the statement automata inside the function:" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.643, + 0.907, + 0.675 + ], + "angle": 0, + "content": "- \\( q.R \\coloneqq T \\), i.e., the return type of these statements is set to the return type of the function. This value is propagated recursively to all sub-automata." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.676, + 0.907, + 0.725 + ], + "angle": 0, + "content": "- \\( q \\).MUSTRETURN := true, for the outermost statement block automaton. It is set to false for deeper nested statement blocks and as soon as a parsed statement \\( X \\) has \\( q_{X} \\).RETURNED set to true - i.e. one of the main body statements returned in every branch." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.726, + 0.906, + 0.775 + ], + "angle": 0, + "content": "- \\( q. \\text{RETURNED} := \\) false, per default in every statement, except a) in return automata, b) inside a multi-statement automaton where the previous statement has RETURNED = true and c) in ITE-automata where both branching statements have RETURNED = true." + }, + { + "type": "list", + "bbox": [ + 0.109, + 0.643, + 0.907, + 0.775 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.781, + 0.922, + 0.848 + ], + "angle": 0, + "content": "As long as a state \\( q \\) in a multi-statement automaton has \\( X. \\text{RETURNED} = \\text{false} \\) and \\( q. \\text{MUSTRETURN} = \\text{true} \\), it can not accept but instead forces the generation of another statement. Since we can always express the requested type through literals and can always generate a return statement to fulfill this requirement, the prefix automaton property is not violated." + }, + { + "type": "title", + "bbox": [ + 0.089, + 0.861, + 0.492, + 0.877 + ], + "angle": 0, + "content": "B Details about Experimental Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.882, + 0.907, + 0.916 + ], + "angle": 0, + "content": "In this section, we detail how executable code is extracted from the model responses and a slight modification to the decoding algorithm used, that increases throughput heuristically." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.09, + 0.083, + 0.496, + 0.098 + ], + "angle": 0, + "content": "Type-Constrained Code Generation with Language Models" + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.084, + 0.906, + 0.096 + ], + "angle": 0, + "content": "171:31" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.118, + 0.912, + 0.203 + ], + "angle": 0, + "content": "Implementation Details. We have two main external dependencies. To implement the regular-expression-based literal automata, we leverage the regex library, as it allows checking if the current string can be completed to match a regular expression. To implement LLM inference, we leverage the transformers library. We provide an exhaustive list of supported and unsupported features of the TypeScript language in our final implementation in Tables 5 and 6, respectively." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.21, + 0.911, + 0.295 + ], + "angle": 0, + "content": "Hyperparameters. We run the models on A100 NVidia GPUs with 80 GB of VRAM and CUDA version 12.4. We set the sampling temperature to 1. We set seeds to 0 to 4 on the four HumanEval runs and 0 on the one MBPP run, respectively. We limit the completions to 1000 tokens and time out after 300 seconds. We compute syntactic correctness using the Oxidation toolchain [52] as the official TypeScript compiler does not clearly distinguish between syntactic and semantic errors." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.301, + 0.911, + 0.402 + ], + "angle": 0, + "content": "Excluded MBPP Instances. We discovered that a number of TypeScript translations in the MultiPL-E dataset [13] contained invalidly generated nested tuples. After reporting them to the developers, they have been resolved in the latest version of MBPP and we include them in our evaluation. Still, we find that the TypeScript translation of a number of MBPP instances contains too broad type annotation, annotating elements as any or array of any. We therefore exclude the following 6 instances from the evaluation:" + }, + { + "type": "text", + "bbox": [ + 0.151, + 0.406, + 0.646, + 0.422 + ], + "angle": 0, + "content": "- mbpp_405_check_tuplex\n- mbpp_612_merge" + }, + { + "type": "text", + "bbox": [ + 0.152, + 0.423, + 0.765, + 0.438 + ], + "angle": 0, + "content": "- mbpp_563extract_values -mbpp_725.extract_quotation" + }, + { + "type": "text", + "bbox": [ + 0.152, + 0.44, + 0.725, + 0.454 + ], + "angle": 0, + "content": "- mbpp_580.extract_even\n- mbpp_791_removeNSTed" + }, + { + "type": "list", + "bbox": [ + 0.151, + 0.406, + 0.765, + 0.454 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.46, + 0.912, + 0.611 + ], + "angle": 0, + "content": "Complete Prompts. We provide the complete LLM prompts for our evaluated tasks (synthesis, translation, and repair) in Figures 12-14. The prompts are templates, instantiated with instructions specific to each task and problem instance. If system prompts are not available for a given LLM, we pretend the system prompt to the first user prompt. The model completion starts from a pre-filled function signature, enabling unified unit testing. For the repair prompt, we add the non-compilable model output as assistant output and use a second turn to pass back compiler outputs. Compiler errors contain line numbers for localization, so we annotate the output with line numbers. We find that Qwen2.5 32B tends to always generate test cases, which leads to errors during compilation. We therefore append the sentence Do not include test cases in the code. to its prompt." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.619, + 0.912, + 0.753 + ], + "angle": 0, + "content": "Extracting Output Code. Given our prompts, LLMs are expected to output the resulting programs. However, they often produce additional outputs, such as generated test cases and explanations. Now we describe our heuristics for extracting the generated code. We first extract the corresponding TypeScript code block (i.e., ``` typescript`, or do not cut off if the block is not closed. Inside the code block, we cut off after the closing curly brace of the last balanced pair of curly braces, if it is followed by a newline or semicolon. This determines the last statement block generated, and avoids cutting off, e.g., inside a template literal. Again, if no such case is found, we do not prune the output. We demonstrate the operation of our cutoff heuristics in Figure 15." + }, + { + "type": "title", + "bbox": [ + 0.089, + 0.763, + 0.357, + 0.78 + ], + "angle": 0, + "content": "C Case Study Full Outputs" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.785, + 0.911, + 0.835 + ], + "angle": 0, + "content": "In §5.4, we present the shortened versions of three qualitative examples showcasing the effectiveness of our approach. In Figures 16-18, we provide the full code outputs of these examples, with detailed descriptions in the respective captions." + }, + { + "type": "page_footnote", + "bbox": [ + 0.089, + 0.887, + 0.305, + 0.902 + ], + "angle": 0, + "content": "3https://pypi.org/project/regex/" + }, + { + "type": "page_footnote", + "bbox": [ + 0.089, + 0.902, + 0.38, + 0.915 + ], + "angle": 0, + "content": "4 https://huggingface.co/docs/transformers" + }, + { + "type": "list", + "bbox": [ + 0.089, + 0.887, + 0.38, + 0.915 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.093, + 0.084, + 0.138, + 0.096 + ], + "angle": 0, + "content": "171:32" + }, + { + "type": "header", + "bbox": [ + 0.341, + 0.083, + 0.908, + 0.098 + ], + "angle": 0, + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + }, + { + "type": "table_caption", + "bbox": [ + 0.345, + 0.117, + 0.651, + 0.133 + ], + "angle": 0, + "content": "Table 5. Supported TypeScript features." + }, + { + "type": "table", + "bbox": [ + 0.092, + 0.136, + 0.905, + 0.641 + ], + "angle": 0, + "content": "
Supported TypeScript FeaturesExamples
Expressions, Statements, Function Declarations(LB as introduced in §3)
Additional Literals: BigInt, Regex, Template Strings10n, /\\d*, 'hello ${user}'
Additional Types: void, null, undefinedvoid, undefined, null
Index Signature Types and Literalslet x: {{y: number}: string} = 1: "hi";
Anonymous Functionsfunction(): bool {return true}
Lambda Functions with and without Function Bodiesx => {return y}, x => y
Ternary and LogicOperators? :, |, &&
Arithmetic and Boolean Operations+, -, **, &, !
Assigning Pre-and Postfix Operators++, --
Arrays[1, 2, 3]
Access and Assignment to Computed Membersx[10] = y[i];
Constructors and "new" Callslet x = new Number(1);
Calls with Optional and Rest Parametersfunction foo(x?: number, y...: string)
Sets and MapsMap<string, number>}()
Parameterized Constructor Callsnew Set<string>}()
Tupleslet x: [int, string] = [1, "hello"];
Optional Chainingx.get("hi").get("world")
Spread Operator[...xs]
Type Assertions"hello" as any
For Loopsfor(int x = 0; i < 10; i++)
For Of Loopsfor(x of xs)
For Of Loops with Tuple Destructuringfor([x, y] of xys)
Do-While and While Loopswhile (true) {...}
Typed and Untyped Variable Declarationslet x: number = 1; let y = 100;
Comments, Multiline Comments// Comment
Returning without Expressionsreturn;
Try-Catch Statements with a Fixed Exception Typetry {...} catch (e) {...}
Throw Statementsthrow new Error("..."
Importing the crypto Libraryrequire("crypto")
Global Scope ObjectsMath, parseInt
Automatic Semicolon Insertion
" + }, + { + "type": "table_caption", + "bbox": [ + 0.334, + 0.67, + 0.663, + 0.686 + ], + "angle": 0, + "content": "Table 6.Unsupported TypeScript features." + }, + { + "type": "table", + "bbox": [ + 0.093, + 0.689, + 0.905, + 0.905 + ], + "angle": 0, + "content": "
Missing FeaturesExamples
General Library Importsrequire("example")
Use of Functions Before Declaration
For In Loopsfor(x in y)
Type Declaration
User-Defined Classes
Declaration and Parameterized Call of General Parameterized Functions
Destructuring Assignment[x, y] = z
Uninitialized, Unannotated Variable Declarationslet x;
Return Type Inference
Literal Types
Enumerables
Symbols
" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.09, + 0.084, + 0.495, + 0.097 + ], + "angle": 0, + "content": "Type-Constrained Code Generation with Language Models" + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.084, + 0.907, + 0.095 + ], + "angle": 0, + "content": "171:33" + }, + { + "type": "code", + "bbox": [ + 0.107, + 0.12, + 0.892, + 0.363 + ], + "angle": 0, + "content": "System: \nYou are an expert in TypeScript programming. Solve the given problem by writing solution code in TypeScript. When answering, insert the solution code in a \\*\\*typescript... block. Do not include test cases in the code.. \nUser: \nCheck if in given array of numbers, are any two numbers closer to each other than given threshold. \n>>> has_close_elements([1.0, 2.0, 3.0], 0.5) \nfalse \n>>> has_close_elements([1.0, 2.8, 3.0, 4.0, 5.0, 2.0], 0.3) \ntrue function \nAssistant: \n``~typescript \nfunction has_close_elements(numbers: number[], threshold: number): boolean {" + }, + { + "type": "image_caption", + "bbox": [ + 0.089, + 0.371, + 0.908, + 0.424 + ], + "angle": 0, + "content": "Figure 12. The full prompt for the synthesis task. Text in green is based on the problem instance, in this case HumanEval #0. The red sentence is only added for Qwen2.5 32B due to its tendency to generate non-compiling test cases." + }, + { + "type": "code", + "bbox": [ + 0.107, + 0.438, + 0.866, + 0.835 + ], + "angle": 0, + "content": "System: \nYou are a helpful and expert programmer in Python and TypeScript. You will be given an input program in Python and your task is to translate this program into TypeScript. You may assume that the input program is correct and that the translation should be semantically equivalent. When answering, insert the solution code in a \\*\\*typescript... block. Do not include test cases in the code.. \nUser: \nThe following is the source program in Python: \n``python \nfrom typing import List \ndef has_close_elements(numbers: List[float], threshold: float) -> bool: for idx, elem in enumerate(numbers): for idx2, elem2 in enumerate(numbers): if idx != idx2: distance = abs(elem - elem2) if distance < threshold: return True return False \nPlease translate the source program to TypeScript. \nAssistant: \n``'typescript \nfunction has_close_elements(numbers: number[], threshold: number): boolean {" + }, + { + "type": "image_caption", + "bbox": [ + 0.089, + 0.845, + 0.908, + 0.899 + ], + "angle": 0, + "content": "Figure 13. The full prompt for the translation task. Text in green is based on the problem instance, blue is the original Python canonical solution, in this case HumanEval #0. The red sentence is only added for Qwen2.5 32B due to its tendency to generate non-compiling test cases." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.093, + 0.085, + 0.136, + 0.095 + ], + "angle": 0, + "content": "171:34" + }, + { + "type": "header", + "bbox": [ + 0.341, + 0.084, + 0.907, + 0.098 + ], + "angle": 0, + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + }, + { + "type": "text", + "bbox": [ + 0.107, + 0.149, + 0.166, + 0.162 + ], + "angle": 0, + "content": "System:" + }, + { + "type": "text", + "bbox": [ + 0.123, + 0.164, + 0.865, + 0.19 + ], + "angle": 0, + "content": "You are an expert in JavaScript programming. Solve the given problem by writing solution code in JavaScript. When answering, insert the solution code in a `````````````````````````````````````````````````````````````````````````````````````````````````````" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.206, + 0.15, + 0.216 + ], + "angle": 0, + "content": "User:" + }, + { + "type": "code", + "bbox": [ + 0.128, + 0.219, + 0.667, + 0.351 + ], + "angle": 0, + "content": "Given an array of positive integers x. return a sorted array of all elements that hasn't any even digit. \nNote: Returned array should be sorted in increasing order. \nFor example: \n>>unique_digits([15,33,1422,1]) \n[1, 15, 33] \n>>unique_digits([152, 323, 1422, 10]) \n[]" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.366, + 0.19, + 0.377 + ], + "angle": 0, + "content": "Assistant:" + }, + { + "type": "code", + "bbox": [ + 0.126, + 0.393, + 0.677, + 0.632 + ], + "angle": 0, + "content": "001: function unique_digits(x: number[]) : number[] { \n002: const filteredArray: number[] = []; // initialize empty array \n003: for (let i = 0; i < x.length; i++) { \n004: let strx = \"\" + x[i]; \n005: let filteredStr = strx \n006: .ToString() \n007: .split(\"\"); \n008: .filter((digit) => parseInt(digit) % 2 != 0); \n009: if (filteredStr.length > 0) { \n010: filteredArray.push(...filteredStr); \n011: } \n012: } \n013: return filteredArray.sort((a, b) => a - b); \n014: }" + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.653, + 0.151, + 0.664 + ], + "angle": 0, + "content": "User:" + }, + { + "type": "code", + "bbox": [ + 0.126, + 0.666, + 0.832, + 0.712 + ], + "angle": 0, + "content": "This output produced an error: \n../../../.././tmp/tmpaewlckfv.ts(10,26): error TS2345: Argument of type 'string' is not assignable to parameter of type 'number'." + }, + { + "type": "text", + "bbox": [ + 0.123, + 0.727, + 0.648, + 0.74 + ], + "angle": 0, + "content": "Write the program again, and make sure to fix the error this time." + }, + { + "type": "text", + "bbox": [ + 0.109, + 0.755, + 0.19, + 0.767 + ], + "angle": 0, + "content": "Assistant:" + }, + { + "type": "code", + "bbox": [ + 0.126, + 0.769, + 0.507, + 0.797 + ], + "angle": 0, + "content": "```\n```\n```script\nfunction unique_digits(x: number[]) : number[] {" + }, + { + "type": "image_caption", + "bbox": [ + 0.088, + 0.81, + 0.907, + 0.882 + ], + "angle": 0, + "content": "Figure 14. The full prompt for the repair task, where we simulate a multi-turn interaction. Text in green is based on the problem instance. blue is non-compilable model output from the synthesis step, in this case HumanEval #104 and Gemma 2 2B. red is compiler feedback. Generations from the synthesis step are annotated with line numbers to aid localizing the compiler error." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.09, + 0.084, + 0.495, + 0.097 + ], + "angle": 0, + "content": "Type-Constrained Code Generation with Language Models" + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.084, + 0.909, + 0.097 + ], + "angle": 0, + "content": "171:35" + }, + { + "type": "code", + "bbox": [ + 0.095, + 0.291, + 0.769, + 0.613 + ], + "angle": 0, + "content": "```cpp\n```\n```\nfunction next_smallest PALINDrome(num: number): number {\n const numStr = num.toString();\n const reversedNumStr = numStr.splitAGING).reverse().joinAGING);\n let min = Math.min(Number(numStr), Number(reversedNumStr));\n}\nif (min === Number(numStr)) {\n return Number(numStr);\n}\nelse {\n return parseInt(min.toString() + 1);\n}\n}\n}\nconst findNextSmallestPalindrome = next_smallest PALINDrome(12321);\nconsole.logfindNextSmallestPalindrome); // Output: 12322\n}\n**Explanation**\nThe function takes a number as input (^num>) in the given criteria." + }, + { + "type": "image_caption", + "bbox": [ + 0.089, + 0.619, + 0.912, + 0.673 + ], + "angle": 0, + "content": "Figure 15. The code generated by Gemma 2 2B for MBPP #0. The cutoff algorithm first extracts the fenced code, last line marked in blue. To avoid the usage of the console object from the browser extension of TypeScript in the generated examples, we further cut off after the last code block, marked in red." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.093, + 0.085, + 0.136, + 0.095 + ], + "angle": 0, + "content": "171:36" + }, + { + "type": "header", + "bbox": [ + 0.341, + 0.084, + 0.907, + 0.097 + ], + "angle": 0, + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + }, + { + "type": "code", + "bbox": [ + 0.095, + 0.153, + 0.554, + 0.399 + ], + "angle": 0, + "content": "1 function select_words(s: string, n: number): string[] { \n2 const result: string[] = []; \n3 for (const word of s.split()) { \n4 let nConsonants = 0; \n5 for (let i = 0; i < word.length; i++) { \n6 const char = word[i].toLowerCase(); \n7 if (!['a', 'e', 'i', 'o', 'u'].includes(char)) { \n8 nConsonants++; \n9 } \n10 } \n11 if (nConsonants === n) { \n12 result.push(word); \n13 } \n14 } \n15 return result; \n16 }" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.404, + 0.909, + 0.451 + ], + "angle": 0, + "content": "(a) The code generated by the unconstrained Gemma 2 2B when translating a Python solution. The model tries to call the split member of a string object without parameters. This is legal in Python but not allowed in TypeScript." + }, + { + "type": "code", + "bbox": [ + 0.095, + 0.474, + 0.563, + 0.76 + ], + "angle": 0, + "content": "1 function select_words(s: string, n: number): string[] { \n2 const result: string[] = []; \n3 for (const word of s.split()) { \n4 let nConsonants = 0; \n5 for (let i = 0; i < word.length; i++) { \n6 const letter = word[i].toLowerCase(); \n7 if (!['a', 'e', 'i', 'o', 'u'].includesletter)) { \n8 nConsonants++; \n9 } \n10 } \n11 if (nConsonants === n) { \n12 result.push(word); \n13 } \n14 } \n15 return result; \n16 }" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.767, + 0.908, + 0.797 + ], + "angle": 0, + "content": "(b) The code generated by Gemma 2 2B with type constraints. The constraints guide the model and help it generate the required argument to split, which determines the characters around which to split the string." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.819, + 0.908, + 0.866 + ], + "angle": 0, + "content": "Figure 16. Comparison between the function generated by unconstrained Gemma 2 2B [64] and the code generated with the guidance of type constraints on the problem select_words in HumanEval [14]. This problem requires a function that selects words from a given list that contain a specified number of consonants." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.09, + 0.084, + 0.495, + 0.097 + ], + "angle": 0, + "content": "Type-Constrained Code Generation with Language Models" + }, + { + "type": "page_number", + "bbox": [ + 0.863, + 0.084, + 0.906, + 0.095 + ], + "angle": 0, + "content": "171:37" + }, + { + "type": "code", + "bbox": [ + 0.097, + 0.288, + 0.479, + 0.396 + ], + "angle": 0, + "content": "function largest_divisor(n: number): number {\nfor(let i = n - 1; i > 0; i--) {\nif(n % i == 0) {\nreturn i;\n}\n}\n}" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.401, + 0.909, + 0.462 + ], + "angle": 0, + "content": "(a) The code generated by the unconstrained Deepseek Coder 33B Instruct. The function loops over all numbers smaller than \\( n \\) to determine the largest divisor. Even though functionally correct for positive numbers, this function returns undefined when the return statement in the loop is never called (e.g., for negative inputs), violating the type system rules." + }, + { + "type": "code", + "bbox": [ + 0.097, + 0.485, + 0.513, + 0.61 + ], + "angle": 0, + "content": "function largest_divisor(n: number): number {\nfor(let i = n - 1; i > 0; i--) {\nif(n % i == 0) {\nreturn i;\n}\n}\nreturn 1; // Return 1 if no such divisor found\n}" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.615, + 0.907, + 0.66 + ], + "angle": 0, + "content": "(b) The code generated by DeepseekCoder 33B Instruct with type constraints. The system ensures that every path returns a value matching the annotated type. This forces the LLM to generate another line after the loop, and the model correctly chooses to generate a fallback return statement." + }, + { + "type": "image_caption", + "bbox": [ + 0.088, + 0.684, + 0.909, + 0.729 + ], + "angle": 0, + "content": "Figure 17. Comparison between the function generated by the unconstrained DeepSeekCoder 33B Instruct [28] and the code generated with the guidance of type constraints on the problem largest_divisor in HumanEval [14]. The problem asks to compute the largest divisor of a given number." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.093, + 0.085, + 0.136, + 0.095 + ], + "angle": 0, + "content": "171:38" + }, + { + "type": "header", + "bbox": [ + 0.341, + 0.084, + 0.907, + 0.097 + ], + "angle": 0, + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + }, + { + "type": "code", + "bbox": [ + 0.096, + 0.221, + 0.674, + 0.357 + ], + "angle": 0, + "content": "function intersperse(numbers: number[], delimiter: number): number[] { \nreturn numbers.reduce((acc, curr, index) => { \nacc.push(curr); \nif (index < numbers.length - 1) { \nacc.push(delimeter); \n} \nreturn acc; \n}, []); \n}" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.362, + 0.909, + 0.515 + ], + "angle": 0, + "content": "(a) The code generated by the unconstrained Gemma 2 9B. The code uses the reduce member of arrays in TypeScript. The reduce member is a generic function that accepts a callback as first argument. This callback has three parameters: an accumulator, a current value and an index. The types of the second and third parameter are fixed to the element type of the callee array and number. The first argument type of the callback function is automatically inferred from the type of the second argument to the reduce function, in this case an empty array. The type of this array is never [], a special type denoting arrays without members. Since we can assign this type to the callee type number[], the accumulator parameter obtains type never[]. However, calling push on this type results in a type violation as the pushed value of type number is not compatible with the inferred element type never. This issue is well known in TypeScript and even human developers frequently stumble upon this issue [47, 48]." + }, + { + "type": "code", + "bbox": [ + 0.095, + 0.537, + 0.703, + 0.674 + ], + "angle": 0, + "content": "function intersperse(numbers: number[], delimiter: number): number[] {\nreturn numbers.reduce((acc: number], num: number, index: number) => {\nif (index > 0) {\nacc.push(delimeter);\n}\nacc.push(num);\nreturn acc;\n}, []);\n}" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.681, + 0.908, + 0.725 + ], + "angle": 0, + "content": "(b) The code generated by Gemma 2 9B [64] with type constraints. We force the model to generate type annotations for the callback function in reduce and thus enable the model to generate the correct type annotation and avoid the issue in the unconstrained code." + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.749, + 0.908, + 0.794 + ], + "angle": 0, + "content": "Figure 18. Comparison between the function generated by the unconstrained Gemma 2 9B [73] and the code generated with the guidance of type constraints on the problem intersperse in HumanEval [14]. The task in this problem is to insert a delimiter number between consecutive elements of an input list." + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09246/056a1ec9-5612-40a0-bb59-6e9ca9d014d6_origin.pdf b/data/2025/2504_09xxx/2504.09246/056a1ec9-5612-40a0-bb59-6e9ca9d014d6_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..542e61d2d6f5746499bff607ee1725d2aa3ec257 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/056a1ec9-5612-40a0-bb59-6e9ca9d014d6_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86b3a9f42b5c6ee190b744c2fc8e51cc0b0e5bc255ebf774af5299e9a04b9620 +size 994414 diff --git a/data/2025/2504_09xxx/2504.09246/full.md b/data/2025/2504_09xxx/2504.09246/full.md new file mode 100644 index 0000000000000000000000000000000000000000..7da6b110593ff45db0cd82b381d75134d839b666 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/full.md @@ -0,0 +1,936 @@ +# Type-Constrained Code Generation with Language Models + +NIELS MündLER*, ETH Zurich, Switzerland + +JINGXUAN HE*, UC Berkeley, USA + +HAO WANG,UC Berkeley,USA + +KOUSHIK SEN, UC Berkeley, USA + +DAWN SONG, UC Berkeley, USA + +MARTIN VECHEV, ETH Zurich, Switzerland + +Large language models (LLMs) have achieved notable success in code generation. However, they still frequently produce uncompilable output because their next-token inference procedure does not model formal aspects of code. Although constrained decoding is a promising approach to alleviate this issue, it has only been applied to handle either domain-specific languages or syntactic features of general-purpose programming languages. However, LLMs frequently generate code with typing errors, which are beyond the domain of syntax and generally hard to adequately constrain. To address this challenge, we introduce a type-constrained decoding approach that leverages type systems to guide code generation. For this purpose, we develop novel prefix automata and a search over inhabitable types, forming a sound approach to enforce well-typedness on LLM-generated code. We formalize our approach on a foundational simply-typed language and extend it to TypeScript to demonstrate practicality. Our evaluation on the HumanEval and MBPP datasets shows that our approach reduces compilation errors by more than half and significantly increases functional correctness in code synthesis, translation, and repair tasks across LLMs of various sizes and model families, including state-of-the-art open-weight models with more than 30B parameters. The results demonstrate the generality and effectiveness of our approach in constraining LLM code generation with formal rules of type systems. + +CCS Concepts: • Theory of computation → Formal languages and automata theory; • Software and its engineering → General programming languages; • Computing methodologies → Machine learning. + +Additional Key Words and Phrases: Code Generation, Language Model, Type System, Program Synthesis, Program Translation, Program Repair, Constrained Decoding + +# 1 Introduction + +Large language models (LLMs) are remarkably successful in diverse fields [12, 27, 49] and increasingly used in everyday coding tasks [25, 68]. They show promising capabilities at synthesizing code from natural language descriptions [37, 59], translating between programming languages [59], and repairing incorrect programs [44, 74]. Despite these achievements, LLM-generated code often contains compilation errors, logic flaws, or security vulnerabilities [20, 53, 55]. These issues arise because LLMs generate code by iteratively sampling the next token from a vocabulary of tokens – a probabilistic process that does not provide any formal guarantees. + +A promising technique to address this limitation is constrained decoding, which enforces the formal rules of programming languages during LLMs' code generation process, rejecting invalid tokens and ensuring only valid tokens are considered as generation candidates. Previous studies have shown that constrained decoding improves adherence to program syntax [8, 41, 57, 66]. + +*Both authors co-lead this project. + +Authors' Contact Information: Niels Mündler, niels.muendler@inf.ethz.ch, ETH Zurich, Switzerland; Jingxuan He, jingxuan. he@berkeley.edu, UC Berkeley, USA; Hao Wang, hwang628@berkeley.edu, UC Berkeley, USA; Koushik Sen, ksen@berkeley. edu, UC Berkeley, USA; Dawn Song, dawnsong@berkeley.edu, UC Berkeley, USA; Martin Vechev, martin.vechev@inf.ethz.ch, ETH Zurich, Switzerland. + +![](images/8791541988ff67791f7d7b874ab136d22e0e41969cd1283171572f806c51e768.jpg) + +However, these improvements are limited, as syntax accounts for only a small part of overall program correctness. For instance, in our evaluation of state-of-the-art open-weight LLMs (§5), syntactic errors make up on average $6\%$ of all compilation errors in generated TypeScript code. + +Key Challenge: Generating Well-Typed Code. Beyond program syntax, type systems detect and reject bugs at compile time [40, 43] and are therefore enforced in many popular programming languages [4, 10, 19]. We observe that LLMs struggle to generate well-typed code [20, 29, 63], as typing rules significantly complicate the generation of valid code [62]. In our evaluation of LLMs (§5), on average $94\%$ of compilation errors result from failing type checks. This suggests a promising direction: guiding LLMs' code generation process by incorporating the formal rules of type systems. + +However, implementing this approach is challenging because type systems can in general not be captured by context-free grammars [43], prohibiting the application of prior constrained decoding methods developed for program syntax [8, 66]. Furthermore, besides deriving and maintaining a type environment for completed expressions during generation (similar to classic type systems), we need to accurately assess and handle partial expressions. Specifically, for each currently generated partial expression, we must decide whether the partial expression can be completed to match a required type. Determining this would allow us to constrain the LLM to provably generate well-typed expressions upon termination, but involves solving the challenging problem of type inhabitation [30, 67] in the novel context of LLM-based code generation. + +This Work: Type-Constrained Decoding. In this work, we introduce type-constrained decoding1, addressing the challenge of generating well-typed code using LLMs. We develop a sound algorithm to determine if a partial program can be completed into a well-typed program. This algorithm is based on a novel non-deterministic automaton we construct. The automaton incrementally builds abstract syntax trees described by the partial program and annotates them with type-relevant context, e.g., declared identifiers and expression types. It leverages such information to maintain a prefix property, ensuring that parsing a program prefix only results in a non-empty set of states when it can be completed into a well-typed program. To guarantee the prefix property, we design a sound type search algorithm that determines whether a partial expression can inhabit a given type. We construct our automaton for a generic, simply-typed Turing-complete calculus [10]. + +To demonstrate its practical effectiveness, we instantiate our approach on a non-trivial subset of TypeScript. We choose TypeScript for three key reasons: (i) it is currently one of the most actively used languages, e.g., in open-source projects on GitHub [26, 38]; (ii) as we show, state-of-the-art LLMs fail to reliably generate well-typed TypeScript code; (iii) its core type system is simple enough [10] to be suitable for developing the first prototype of our approach. We perform a comprehensive evaluation on TypeScript versions of the widely-used HumanEval and MBPP benchmarks [5, 13, 14], focusing on three common coding tasks: synthesis, translation, and repair. Our experimental results show that type-constrained decoding significantly enhances code generation for LLMs of various sizes (2B-34B parameters). For synthesis and translation, it reduces compilation errors by more than half and increases functional correctness relatively by $3.5\%$ to $5.5\%$ . Additionally, it enhances functionally correct repair of non-compiling code relatively by $37\%$ on average. We further investigate our approach in depth through a runtime analyses and case studies. + +We highlight that our type constraining approach is broadly applicable to any language derivable from the core calculus, any code generation task in these languages, and any LLM utilizing next-token generation. In §6, we envision how our approach can benefit other production-ready languages and closed-weight LLMs. + +Main Contributions. Our main contributions can be summarized as follows: + +- A prefix automaton and a type search algorithm to enable type constraining for LLM-based code generation, demonstrated on a generic, simply-typed core calculus (§3). +- An instantiation and extension of our approach to the popular TypeScript language (§4). +- An extensive evaluation across various LLMs and coding tasks, showing the significant benefit of our approach in reducing compilation errors and increasing functional correctness (§5). + +# 2 Background and Overview + +In this section, we first provide relevant background on LLM-based code generation and constrained decoding. Then, we motivate our type constraining approach using an illustrative example and present a high-level overview of its construction. + +# 2.1 Background on LLM-based Code Generation and Constrained Decoding + +LLM-based Code Generation. LLMs generate code incrementally by sampling one token at a time in an iterative manner, as depicted in Algorithm 1 (without the blue highlights). A user prompt $x$ specifies a code generation task for a trained LLM. At Line 1, the output program $s$ is initialized to an empty string or a program prefix provided in $x$ , e.g., a function signature. At the beginning of each generation iteration (Line 3), the LLM takes as input a concatenation $x \circ s$ of the prompt $x$ and the current partial program $s$ . It then predicts a probability distribution $v$ over a fixed, finite set of tokens, the vocabulary, where each token may be a single Unicode character or a string of multiple characters. All common singleton characters are included in LLMs' vocabulary, ensuring that any standard program can be produced by concatenating tokens [60]. Next, based on distri + +Algorithm 1 Vanilla LLM-based code generation (without the blue highlights) vs. constrained decoding (with the blue highlights) + +Input: LLM, prompt $x$ , completion engine $CE_L$ for language $L$ + +Output: Program $s$ such that $s \in L$ + +1: initialize s +2: while true do +3: $\pmb{v} := \mathrm{LLM}(x \circ s)$ +4: while true do +5: $t\sim \pmb{v}$ +6: if $CE_L(s \circ t)$ then break +7: elif $t = EOS$ and $s \in L$ then break +8: else $\pmb{v}[t] := 0$ ; normalize $\pmb{v}$ +9: if $t = EOS$ then break +10: $s := s \circ t$ +11: return s + +bution $\pmb{v}$ , a token $t$ is sampled (Line 5) and appended to the program $s$ (Line 10). This process is repeated until we encounter the special token EOS which signifies the end of the sequence (Line 9). + +LLMs learn to predict adequate probability distributions from extensive training on natural and programming languages [12, 59, 73]. These distributions implicitly encode language rules, allowing LLMs to successfully solve code generation tasks [13, 28, 59]. However, LLMs may fail to infer complex rules [9, 21, 72], derive incomplete rules for less common languages [13, 51], and, due to the probabilistic nature of its generation procedure, not consistently follow formal language rules. + +Constrained Decoding. The aforementioned shortcoming of LLMs can be mitigated by employing constrained decoding, which analyzes the intermediate model outputs $s$ during the generation process and enforces that only valid tokens are incorporated. Specifically, constrained decoding leverages a completion engine $CE_{L}$ , specific to a language $L$ . Computing $CE_{L}(s)$ returns whether partial program $s$ can be completed to a well-formed program in $L$ , meaning whether there exists a (possibly empty) string $s'$ such that $s \circ s' \in L$ . Equivalently, $CE_{L}(s)$ determines whether $s$ belongs to the prefix language $L^{p}$ of $L$ , i.e., whether $s \in L^{p}$ . $L^{p}$ is formally defined as follows: + +DEFINITION 1. For a given language $L$ , its prefix language is $L^p \coloneqq \{s \mid \exists s': s \circ s' \in L\}$ . + +
function is_int(text: string): boolean {<completion>VanillaSyntaxTypesDescription
const num = Number(text);(1) ;acceptrejectrejectSyntactically invalid
return !isNaN(num) &&(2) beracceptacceptrejectUndeclared identifier
parseInt(num <completion>(3) ()acceptacceptrejectDisallowed operator
(4), 10)(5).string()acceptacceptrejectInvalid argument type
acceptacceptacceptWell-formed option
+ +Figure 1. Left is a partial TypeScript program derived from instance #113 of the MBPP benchmark [5], awaiting completion. Right are five completion options: (1)-(4) are invalid and (5) is well-formed. Our type-constrained decoding is the only approach capable of correctly rejecting invalid completions and accepting the valid one. + +As illustrated in blue highlights of Algorithm 1, constrained decoding differs from vanilla LLM-based code generation by adding an additional sample-and-check loop at Line 4 around the token sampling process at Line 5. A sampled token $t$ is considered further only if $s \circ t$ can be completed to a well-formed program (Line 6) or $t$ is EOS and $s$ is already well-formed in $L$ (Line 7). Otherwise, the probability of $t$ is set to zero at Line 8, and the sample-and-check loop repeats. Note that a token $t$ satisfying either Line 6 or Line 7 always exists, because $s$ is in $L^p$ and LLMs' vocabulary contains all common characters. Therefore, the number of iterations of the loop at Line 4 is bounded by the fixed LLM vocabulary size. In practice, only few iterations are needed (\$5.3) and do not require additional LLM inference, ensuring a reasonable runtime overhead compared to vanilla decoding. + +The token-level guarantees extend inductively to guarantee the final program's validity with respect to $L$ . At Line 1, we start with a valid prefix in $L^p$ , i.e., either an empty string or a valid prefix provided in the user prompt. The check at Line 6 ensures that all intermediate outputs $s$ are prefixes in $L^p$ . Additionally, Line 7 and Line 9 ensure that the return statement in Line 11 is reached only if $s \in L$ . As an additional benefit, by steering previously ill-formed generations into well-formed ones, constrained decoding also increases the likelihood of generating functionally correct code. + +Note that commonly used grammar and type checkers can not be used as a completion engine for constrained decoding. They judge whether a program string $s$ is well-formed according to the language $L$ , i.e., whether $s \in L$ . When $s$ is not a complete program in $L$ , but a valid prefix in $L^p$ , they return a different output than $CE_L(s)$ , which is not suitable for use in Algorithm 1. + +# 2.2 Overview of Our Type Constraining Approach + +Inadequacy of Syntax-Only Constraining. To apply the constrained decoding algorithm described in §2, one needs to choose a language $L$ and implement the completion engine $CE_{L}$ . Recent work has explored defining $L$ as the set of syntactically valid programs, thus leveraging the syntactic rules of programming languages for constrained decoding [8, 66, 71]. However, the benefits of this approach are limited, because syntax accounts for only a small portion of overall program correctness. For instance, across our evaluations (§5), only $3.5\%$ of the functional errors and $6\%$ of the compilation errors in LLM-generated code are due to syntactic errors. + +We illustrate this limitation using the example in Figure 1. It presents five completion candidates for a partial program: (1)-(4) will lead to compilation errors and only (5) can result in a well-formed program. Based on syntax, completions that contain line terminations or invalid characters (e.g., $) could be rejected (1). However, many other cases, including (2)-(4), do not break syntactic rules but still cause compilation errors. For instance, candidate (2) results in accessing an undeclared identifier. In candidate (3), the function call operator will fail at execution time, as num is a number and can not be called. Candidate (4) passes a value of unexpected format to parseInt, which expects the first argument to be a string. In this example, (4) is generated by CodeLlama 34B [59]. Syntax-only constraining accepts this invalid completion, leading to a non-compilable final output. + +Our Approach: Leveraging the Type System. We require stronger constraints to effectively guide the model generation. Beyond syntax, type systems are commonly utilized in compilers, enforcing semantic rules to detect and reject bugs at compile time [23]. For Figure 1, the TypeScript type system would correctly reject code containing erroneous completions (2)-(4). Therefore, in this work, we propose leveraging type systems in constrained decoding to guide code generation. Our method accurately detects that only candidate (5) is a valid completion, guiding CodeLlama 34B to adopt this option and complete the program correctly. As detailed in §5, our experimental results demonstrate that our approach more than halves compiler errors in generated code and consistently increases the proportion of functionally correct programs. + +Incorporating typing rules into code generation offers substantial potential but presents a significant challenge. Previous research has focused primarily on constrained decoding for context-free languages, for which prefixes can be efficiently determined [8, 66, 71]. Type systems, however, require language specifications that exceed the capabilities of context-free grammars [43], inhibiting the direct application of prior techniques to type-constrained decoding. Moreover, determining whether a partially generated expression can be completed to be a well-typed full expression involves not only type checking and inference, as done in traditional compilers, but also addressing type inhabitation [39, 67]. + +To address these challenges, we design and implement a practical approach to determine whether a string can be completed to a well-typed program. We begin by developing a specialized kind of non-deterministic automaton that maintains a prefix property, formally defined in §3.2. This property ensures that every reachable state can lead to an accepting state. We leverage this property to build a completion engine for constrained decoding as in Algorithm 1. We construct such a completion engine to enforce well-typedness for a simply-typed language $L_{B}$ in §3.3-§3.5 and extend it to a core subset of TypeScript in §4. At a high level, the automaton acts as a syntactic parser, additionally maintaining information about initialized variables, enclosing function declarations, and other type-related aspects of the partially parsed syntax tree. This is possible through dynamically created + +annotated states that track the additional information. + +In Figure 2, we provide a concrete example for our prefix automata. Every state represents the currently parsed syntactic component and additionally tracks the surrounding typing information. For example, after parsing the partial program in Figure 1, the automaton currently parses an expression as the first argument to function parseInt. Transitions are annotated with further code completions that are deemed admissible based on the syntax and typing information. In the first state, the automaton has parsed num, inferring from previous declarations that it represents an identifier of type number. Based on the signature of the parseInt function call, the required type of the completed + +![](images/97d87110c0b553b842106c346a101d14eee820100979e854ea26b5cfa06cdac3.jpg) +Figure 2. An example of a prefix automaton. + +argument is string. The automaton now determines the admissible transitions from the identifier state. State transitions corresponding to completions (1)-(4) from Figure 1 are disallowed, as they are determined to violate type rules based on the tracked type information. Further, the automaton needs to determine which operations on the current expression num of type number can be applied to obtain an expression of type string. To achieve this, we develop a type reachability search algorithm, which finds string-typed expressions num.toString() and num.isFinite(). toString(). Therefore, it returns that accesses to members .ToString and .isFinite are admissible, resulting in the two depicted transitions with the corresponding labels. In our experiment, CodeLlama 34B chooses to transition along .ToString(), the more likely completion based on its training data. Note that in our actual automaton formalism, as described at the end of §3.2, state transitions are + +on a character level. Figure 2 condenses character-level transitions into string-level transitions for presentation purposes. + +The type reachability algorithm seeks to identify sequences of operators applied to a given expression such that the resulting expression possesses a required type. Conceptually, it performs a search over an abstracted type graph, whose nodes are types, and edges represent well-typed operations connecting the input and output types. An example of such a (partial) graph is shown in Figure 3, with a valid path highlighted in green color. Starting from the derived number type of num, the search first traverses a member access edge to reach the nullary function type () => string. Then, it traverses an edge representing a function call to reach the goal type string, concluding that the combination of traversed operators . toString() is a well-formed + +completion for Figure 1. The path for num.isFinite().ToString() is analogously valid but omitted in Figure 3 for brevity. This type reachability search is invoked every time a partial expression is parsed, in order to determine valid transitions in the prefix automaton. + +![](images/7677ba40fa4dc888f7b11ccdde1711e919f778b9badf962eced9f4c57b32331b.jpg) +Figure 3. An example of a partial type search graph. + +We implement our approach for a significant subset of TypeScript (§4) and experimentally evaluate it for various LLMs and three important code generation tasks: synthesis, translation, and repair (§5). The results demonstrate that our approach provides significant benefits in both reducing compilation errors for LLM-generated code and increasing their functional correctness. + +# 3 Our Type Constraining Approach + +In this section, we first present a generic, simply-typed language $L_{B}$ (§3.1). Then, we present our type constraining approach using $L_{B}$ . Specifically, we introduce our prefix automaton formalism (§3.2) and define increasingly complex automata for parsing well-typed fragments of $L_{B}$ , beginning with identifiers, literals, and types (§3.3), continuing to expressions, including type search for type-restricted expressions (§3.4), and concluding with statements (§3.5). + +# 3.1 A SimplyTyped Language + +We define a simply typed, Turing-complete language, $L_{B}$ . Its grammar and type system are generic, resembling the principles found in popular statically typed languages, such as TypeScript, Java, and Go. However, there may be a slight bias towards TypeScript, as our implementation is based on it. + +Syntax. The syntax of $L_{B}$ is shown in Figure 4. The language includes expressions, type-annotated variable and function definitions, and control flows. Overall, it is based on a core subset of TypeScript [10] but can be adapted for other statically typed languages. Similar to Bierman et al. [10], we represent Kleene-Star repetitions using an overline, e.g., $\overline{s}$ represents a sequence of statements $s$ , and adhere to the TypeScript documentation to annotate parameter types in function signatures with argument names [17]. We make a distinction between base and extension expressions. The latter applies operators to previous expressions, leading to more complex expressions. This differentiation is useful later in §3.4 for constructing the prefix automaton for parsing expressions. + +Expression Typing Rules. The typing rules for $L_B$ 's expressions are detailed in Figure 5. These rules form a subset of safeFTS, a type-safe portion of TypeScript described by Bierman et al. [10], allowing us to leverage their soundness results. The type rules for $L_B$ use the standard concept of a type environment, denoted as $\Gamma$ , which is a collection of pairs $(x : T)$ of identifiers $x$ and types $T$ . We write $\Gamma \vdash e : T$ if the expression $e$ has type $T$ in the type environment $\Gamma$ . An expression $e$ is considered valid if its type can be derived by applying the given typing rules. + +
l ::=Literalp ::= x : TTyped Identifier
\d+Numeric Literal
"\"w*”String LiteralT ::=Type
true | falseBoolean LiteralnumberNumeric Type
stringString Type
x ::= \w+IdentifierbooleanBoolean Type
(¯p) => TFunction Type
e ::= e0 | e1Expression
e0 ::=Base Expressions ::=Statement
lLiterallet x : T;Variable Declaration
xIdentifiere;Expression Statement
(¯p) => eFunction Expressionreturn e;Return Statement
(e)Grouped Expression{¯s}Statement Block
e1 ::=Extension Expressionfunction x (¯p) : T {¯s}Function Definition
e ⊙ eBinary Operatorif (e) s else sIf-Then-Else Statement
e(¯e)Function Call
e.nMember AccessM ::=¯sProgram
+ +Figure 4. The syntax of $L_{B}$ . Expressions are categorized into base and extension expressions. The later extends a given expression with suffix operators to form more complicated expressions. + +_literals are evaluated to their respective types (LIT - {NUM, STR, BOOL}). Identifiers $x$ are evaluated based on the corresponding type in the type environment (IDENT). Anonymous functions are typed according to their annotated parameter types, with the return type determined by the returned expression (ANON). Grouping preserves the type of the inner expression (GROUP). + +Binary operators have predefined signatures $S_{1} \odot S_{2}: T$ , such as number + number : number for addition and $T = T: T$ for assignments. These signatures must be satisfied in well-typed expressions (op). Function calls require parameters to match the function signature (CALL). The type of member accesses $e.n$ is determined using an auxiliary function LOOKUP(S, n), which fetches the type of member $n$ for type $S$ . An instantiation of LOOKUP for TypeScript is provided by Bierman et al. [10]. + +Statements and Type Environments. The typing rules for statements are presented in Figure 6. Type environments are modified by statements, in particular variable declarations and function definitions. We use the notation $\Gamma_1 \vdash s \rightarrow \Gamma_2$ to indicate that after executing statement $s$ in type environment $\Gamma_1$ , the new environment is $\Gamma_2$ . + +Variable declarations introduce the identifier with declared type into the type environment, provided the identifier is not already defined (DECL). The type environment defines the context to evaluate expressions (EXPR) and return statements (RET). Return statements are only well-typed inside function bodies. The statements inside statement blocks and if-then-else statements must maintain valid type environments, but do not have an external effect (BLOCK, ITE). This also applies to function definitions; however, the defined function is finally added to the external type environment (FUN). Lastly, empty statements do not alter the type environment (NOP), while statement sequences propagate the type environment along the execution (SEQ). + +Return Types. The rules for checking return types are presented in Figure 7. Firstly, return statements must contain expressions matching the function's declared return type. Secondly, such an expression must be returned on every execution path. We use the notation $\Gamma \vdash \overline{s} : R$ to indicate the sequence of statements $\overline{s}$ ensures a return value of type $R$ . + +For variable declarations and expression statements, the return type of the subsequent statements is considered (R-DECL, R-EXPR). The return type of a return statement directly corresponds to the + +$$ +\left[ \mathrm {L I T - N U M} \right] \frac {}{\Gamma \vdash \backslash d + : n u m b e r} \quad \left[ \mathrm {L I T - S T R} \right] \frac {}{\Gamma \vdash " \backslash w * " : s t r i n g} \quad \left[ \mathrm {L I T - B O O L} \right] \frac {}{\Gamma \vdash \text {t r u e , f a l s e : b o o l e a n}} +$$ + +$$ +\left[ \mathrm {I D E N T} \right] \frac {(x : T) \in \Gamma}{\Gamma \vdash x : T} \quad \left[ \mathrm {A N O N} \right] \frac {\Gamma \cup \overline {{p}} \vdash e : T}{\Gamma \vdash (\overline {{p}}) \Rightarrow e : (\overline {{p}}) \Rightarrow T} \quad \left[ \mathrm {C A L L} \right] \frac {\Gamma \vdash f : (\overline {{x}} : \overline {{S}}) \Rightarrow T \quad \Gamma \vdash \overline {{e}} : \overline {{S}}}{\Gamma \vdash f (\overline {{e}}) : T} +$$ + +$$ +\left[ \mathrm {G R O U P} \right] \frac {\Gamma \vdash e : T}{\Gamma \vdash (e) : T} \quad \left[ \mathrm {O P} \right] \frac {\Gamma \vdash e _ {1} : S _ {1} \quad \Gamma \vdash e _ {2} : S _ {2} \quad S _ {1} \odot S _ {2} : T}{\Gamma \vdash e _ {1} \odot e _ {2} : T} \quad \left[ \mathrm {M E M} \right] \frac {\Gamma \vdash e : S \quad \text {L O O K U P} (S , n) = T}{\Gamma \vdash e . n : T} +$$ + +Figure 5. Typing rules for $L_B$ 's expressions. + +$$ +\left[ \mathrm {D E C L} \right] \frac {x \notin \Gamma}{\Gamma \vdash \operatorname {l e t} x : T ; \rightarrow \Gamma \cup (x : T)} \quad \left[ \mathrm {E X P R} \right] \frac {\Gamma \vdash e : T}{\Gamma \vdash e ; \rightarrow \Gamma} \quad \left[ \mathrm {R E T} \right] \frac {\text {i n s i d e f u n c t i o n b o d y} \quad \Gamma \vdash e : T}{\Gamma \vdash \operatorname {r e t u r n} e ; \rightarrow \Gamma} +$$ + +$$ +\left[ \mathrm {B L O C K} \right] \frac {\Gamma \vdash \overline {{s _ {B}}} \leftrightarrow \Gamma_ {B}}{\Gamma \vdash \{\overline {{s _ {B}}} \} \nrightarrow \Gamma} \quad \left[ \mathrm {F U N} \right] \frac {x \notin \Gamma \qquad \Gamma \cup (x : (\overline {{p}}) = > T) \cup (\overline {{p}}) \vdash \overline {{s _ {x}}} \nrightarrow \Gamma_ {x}}{\Gamma_ {1} \vdash \text {f u n c t i o n} x (\overline {{p}}) : T \{\overline {{s _ {x}}} \} \nrightarrow \Gamma \cup (x : (\overline {{p}}) = > T)} +$$ + +$$ +\left[ \mathrm {I T E} \right] \frac {\Gamma \vdash s _ {i f} \rightarrow \Gamma_ {i f} \quad \Gamma \vdash s _ {e l s e} \leftrightarrow \Gamma_ {e l s e}}{\Gamma \vdash \mathrm {i f} (e) s _ {i f} \mathrm {e l s e} s _ {e l s e} \leftrightarrow \Gamma} \quad \left[ \mathrm {N O P} \right] \frac {}{\Gamma \vdash \bullet \rightarrow \Gamma} \quad \left[ \mathrm {S E Q} \right] \frac {\Gamma_ {1} \vdash \bar {s} \leftrightarrow \Gamma_ {2} \quad \Gamma_ {2} \vdash s \leftrightarrow \Gamma_ {3}}{\Gamma_ {1} \vdash \bar {s} s \leftrightarrow \Gamma_ {3}} +$$ + +Figure 6. Type environment extension rules for sequences of statements in $L_{B}$ . + +$$ +\left[ R - D E C L \right] \frac {\Gamma \vdash \bar {s} : R}{\Gamma \vdash \operatorname {l e t} x : T ; \bar {s} : R} \quad \left[ R - E X P R \right] \frac {\Gamma \vdash \bar {s} : R}{\Gamma \vdash e ; \bar {s} : R} \quad \left[ R - R E T \right] \frac {\Gamma \vdash e : R}{\Gamma \vdash \operatorname {r e t u r n} e ; \bar {s} : R} +$$ + +$$ +\left[ \mathrm {R - B L O C K - S E L F} \right] \frac {\Gamma \vdash \overline {{s _ {B}}} : R \quad \Gamma \vdash \overline {{s}}}{\Gamma \vdash \{\overline {{s _ {B}}} \} \overline {{s}} : R} \quad \left[ \mathrm {R - B L O C K - N E X T} \right] \frac {\Gamma \vdash \overline {{s _ {B}}} \quad \Gamma \vdash \overline {{s}} : R}{\Gamma \vdash \{\overline {{s _ {B}}} \} \overline {{s}} : R} +$$ + +$$ +\left[ \mathrm {R - F U N} \right] \frac {\Gamma \cup (x : (\bar {p} \Rightarrow R)) \vdash \bar {s} : R ^ {\prime} \quad \Gamma \cup (x : (\bar {p}) \Rightarrow R) \cup (\bar {p}) \vdash \bar {s _ {x}} : R}{\Gamma \vdash \text {f u n c t i o n} x (\bar {p}): R \{\bar {s _ {x}} \} \bar {s} : R ^ {\prime}} +$$ + +$$ +\left[ \mathrm {R - I T E - S E L F} \right] \frac {\Gamma \vdash s _ {i f} : R \quad \Gamma \vdash s _ {e l s e} : R}{\Gamma \vdash \mathrm {i f} (e) s _ {i f} \mathrm {e l s e} s _ {e l s e} \bar {s} : R} \quad \left[ \mathrm {R - I T E - N E X T} \right] \frac {\Gamma \vdash \bar {s} : R}{\Gamma \vdash \mathrm {i f} (e) s _ {i f} \mathrm {e l s e} s _ {e l s e} \bar {s} : R} +$$ + +Figure 7. $L_{B}$ 's typing rules for function returns. + +type of the returned expression (R-RET). For statement blocks, the return type is decided by either the block itself or the subsequent statements (R-BLOCK-SELF, R-BLOCK-NEXT). In function definitions, the return type is determined by the type of the subsequent statements, similar to expression statements. It is additionally required that the function body returns a type matching the declared return type (R-FUN). For if-then-else statements, both branches must return the same type (R-ITE-SELF), or the return type is determined by the following statements (R-ITE-NEXT). + +Language Definition. In summary, a program $s$ is in language $L_{B}$ if both (i) $s$ conform to the grammar in Figure 4 and (ii) $s$ is well-typed according to the typing rules in Figures 5-7. + +# 3.2 Prefix Automaton Definition + +We introduce a general definition of prefix automata, beginning with basic automata concepts. Prefix automata are standard automata that ensure a special prefix property2. This property enables us to use a prefix automaton to decide whether some string is in the prefix language $L^p$ of a given language $L$ . That is, the prefix automaton can function as a completion engine $CE_L$ to facilitate constrained decoding, as described in §2. + +We consider an automaton $A \coloneqq \langle \Sigma, Q, \delta, I, F \rangle$ , a tuple of the five following elements: (i) $\Sigma$ is an alphabet of input symbols; (ii) $Q$ is a set of states; (iii) $\delta : Q \times \Sigma \mapsto \mathcal{P}(Q)$ is a computable transition function that maps a state and an input symbol to a finite set of next states; (iv) $I \subseteq Q$ is a finite set of initial states; and (v) $F \subseteq Q$ is a decidable set of accepting states. As a convention, we denote a symbol in $\Sigma$ as $c$ , a string of symbols in $\Sigma^*$ as $s$ , the empty string as $\varepsilon$ and an operator for concatenating symbols and strings as $\circ$ . The transition function $\delta$ maps a given state to all possible subsequent states. When $\delta$ is applied on a set of states $\mathbf{q} \subseteq Q$ , we take the union of the results as output, i.e., $\delta(\mathbf{q}, c) \coloneqq \bigcup_{q \in \mathbf{q}} \delta(q, c)$ . The transition function defines a directed graph $G$ over $Q$ , where every state is a node and there is an edge annotated with $c$ from $q$ to $q'$ if $q' \in \delta(q, c)$ . The language parsed by $A$ comprises all strings $s$ such that traversing $G$ from some initial state in $I$ along the edges annotated with $c_i$ for $c_1 \circ c_2 \circ \ldots \circ c_n = s$ , it is possible to reach some accepting state in $F$ . Formally, we define recursively a traversal function $\gamma$ for states $\mathbf{q}$ as $\gamma(\mathbf{q}, \varepsilon) \coloneqq \mathbf{q}$ and $\gamma(\mathbf{q}, s \circ c) \coloneqq \delta(\gamma(\mathbf{q}, s), c)$ . The language accepted by $A$ is then defined as $L(A) \coloneqq \{s \mid \gamma(I, s) \cap F \neq \emptyset\}$ . The traversal function has two intuitive properties concerning reachability that can be shown inductively: + +(P1) A path along the graph can be split arbitrarily, i.e., $\gamma (\mathbf{q},s\circ s^{\prime}) = \gamma (\gamma (\mathbf{q},s),s^{\prime})$ +(P2) If a state is reached by $s \circ s'$ , some state is reachable by $s$ , i.e., $\gamma(\mathbf{q}, s \circ s') \neq \emptyset \Rightarrow \gamma(\mathbf{q}, s) \neq \emptyset$ . + +An automaton satisfies the prefix property or is a prefix automaton, if there is a path from every reachable state to some accepting state, or formally: + +DEFINITION 2. For an automaton $A$ , the prefix property holds iff $\forall q \in \gamma(I, s) : \exists s' : \gamma(q, s') \cap F \neq \emptyset$ . The automaton is a prefix automaton if it satisfies the prefix property. + +Intuitively, for such automata, reaching some state by consuming string $s$ implies that $s$ is a prefix to some member of $L(A)$ . We define the reachable language of $A$ , all inputs that result in some state, as $L_r(A) := \{s \mid \gamma(I, s) \neq \emptyset\}$ . Below, we establish the equivalence of $L_r(A)$ and $L(A)^p$ , the prefix language of $L(A)$ as defined in Definition 1. + +LEMMA 1. If $A$ is a prefix automaton, then $L(A)^p = L_r(A)$ . + +Proof. For any $s \in L(A)^p$ there exists $s'$ such that $s \circ s' \in L(A)$ , by the definition of prefix languages. By the definition of $L(A)$ , this implies $\gamma(I, s \circ s') \neq \emptyset$ . Then, using (P2), we further derive $\gamma(I, s) \neq \emptyset$ , i.e., $s \in L_r(A)$ . Therefore, $L(A)^p \subseteq L_r(A)$ holds. The other direction also holds. We first see that $s \in L_r(A) \implies \gamma(I, s) \neq \emptyset$ . Then applying Definition 2 and (P1), we find $\exists s': \gamma(I, s \circ s') \cap F \neq \emptyset$ , implying $s \circ s' \in L(A)$ and thus $s \in L(A)^p$ . + +Note that $L(A)^P \subseteq L_r(A)$ holds generally for automata, since the first half of the proof does not require the prefix property. + +From Prefix Automata to Completion Engines. With Lemma 1, given a prefix automaton $A$ , we can define a convenient-to-compute completion engine for the underlying language $L(A)$ : $CE_{L(A)}(s) \coloneqq \gamma(I, s) \neq \emptyset$ . Since our target language is $L$ and not $L(A)$ , we now need to determine the relationship between $L(A)$ and $L$ . If we construct $A$ such that it parses a subset of $L$ , i.e., $L(A) \subseteq L$ , we are guaranteed that all LLM generations constrained by $CE_{L(A)}$ lie in $L$ . Conversely, if $L(A) \supseteq L$ , + +we are guaranteed that every string in $L$ can be expressed under constrained decoding, but not that every generation is valid. For example, if $A$ permits all syntactically correct programs, it guarantees that all well-typed programs can be generated, but permits ill-typed programs as well. Therefore, $L(A) \subseteq L$ is required to achieve our goal of enforcing well-typedness on LLM-generated code. Ideally, $A$ would parse $L$ exactly, i.e., $L(A) = L$ , which in our setting additionally guarantees that every well-typed program can be expressed under the constraints of the completion engine. If this is not achieved, it is important for $A$ to capture a large subset of $L$ to be practically useful. + +Building a Prefix Automaton for $L_B$ : Warming up. In the next sections, we will construct a prefix automaton for soundly parsing well-typed programs in $L_B$ , by presenting various prefix automata for well-typed fragments of $L_B$ . Our final automaton will cover a significant but incomplete subset of $L_B$ . Incompleteness exists because to ensure that our algorithms terminate, we do not cover high-order types that are less likely to occur in practice. This is discussed in more detail in §3.4. Our evaluation in §5 empirically demonstrates that our approach sufficiently covers practical use cases to significantly improve the correctness of LLM-generated code. + +We choose $\Sigma$ to be the set of Unicode characters. This makes our completion engine agnostic to LLM vocabularies. Even though LLMs' vocabularies differ, their tokens are always a string of single or multiple characters. When our completion engine for $L_{B}$ is called during constrained decoding, i.e., at Line 6 of Algorithm 1, it processes the sampled token character by character. + +Before proceeding, we briefly introduce several base prefix automata below, with their precise definitions detailed in Appendix A.1. These automata are later combined, with parts of the transition function being overwritten, to construct more complex automata that capture elements of $L_{B}$ . + +- Union $A_X \cup A_Y$ parses the language $\{s \mid s \in L(A_X) \cup L(A_Y)\}$ . It is a prefix automaton if both $A_X$ and $A_Y$ are prefix automata. +- Concatenation $A_{XY}$ parses the language $\{s \circ s' \mid s \in L(A_X), s' \in L(A_Y)\}$ . It is a prefix automaton if $A_X$ and $A_Y$ are both prefix automata, and $L(A_Y) \neq \emptyset$ . +- Kleene-Star $A_{\overline{X}}$ parses the language $\{\overline{s} \mid s \in L(A_X)\}$ . It is a prefix automaton if $A_X$ is a prefix automaton. +- Terminal $A_{\mathsf{S}}$ parses the language $\{\mathsf{S}\}$ , where $\mathsf{S}$ is a fixed, non-empty string. +- Empty $A_{\emptyset}$ parses the empty language $\varnothing$ and is always a prefix automaton. + +# 3.3 Prefix Automata for Identifiers, Literals, and Types + +We now introduce prefix automata for basic syntactic elements of $L_{B}$ : identifiers, literals, and type annotations. The languages parsed by these automata exactly match their counterparts in $L_{B}$ . + +**Literals.** The prefix automaton for literals $A_{I} \coloneqq A_{\mathrm{NUM}} \cup A_{\mathrm{STR}} \cup A_{\mathrm{BOOL}}$ accepts number, string, and boolean literals as defined in Figure 4. The automata $A_{\mathrm{NUM}}, A_{\mathrm{STR}}$ , and $A_{\mathrm{BOOL}}$ are defined by the deterministic finite automaton representation of the corresponding regular expression of the literal. To ensure the prefix property on the finite automata of the regular expression, we prune states from which accepting states can not be reached. + +Identifiers. During parsing, we maintain the current type environment $\Gamma$ , as detailed in §3.5. We define the identifier automaton $A_{x}$ as the union of the terminal automata for identifiers defined in $\Gamma$ . In other words, $A_{x} := \bigcup_{y \in \Gamma} A_{y}$ . + +Types. The type automaton $A_{T}$ accepts type annotations as defined in the grammar of $L_{B}$ (Figure 4). It is defined as $A_{T} := A_{\mathrm{TYPE - LIT}} \cup A_{\mathrm{TYPE - FUN}}$ . This includes type literal automaton $A_{\mathrm{TYPE - LIT}} := A_{\mathrm{string}} \cup A_{\mathrm{number}} \cup A_{\mathrm{boolean}}$ and function type automaton $A_{\mathrm{TYPE - FUN}} := A_{(\overline{p})} \Rightarrow T$ . The latter is a concatenation of multiple prefix automata, with the parameter and return types recursing on $A_{T}$ . This recursive + +definition is valid, since it ensures a finite set of initial states, defines a decidable accepting set, and preserves the prefix property. + +# 3.4 Prefix Automaton for Expressions + +We introduce prefix automata to parse well-typed expressions in $L_{B}$ . We begin by describing an automaton $A_{e}$ to parse expressions whose types are unrestricted, e.g., any expression $e$ in an expression statement $e$ ;. Then, we present an automaton $A_{e} \downarrow T$ for expressions whose type is constrained to $T$ , e.g., for parameters of function calls. The type-constrained version accepts a string only if the inhabited type of the represented expression matches $T$ . To preserve the prefix property, we need to ensure that partial expressions can be completed to inhabit the constrained type. Completions may involve arbitrarily many applications of operators, which may modify the expression type. We therefore introduce a type search algorithm that soundly determines which types an expression can inhabit, and use it to prune transitions that violate the prefix property. + +Unrestricted Expressions. To handle the recursive syntactic structure of expressions, we differentiate two kinds as shown in Figure 4: base expressions, including identifiers, literals, grouped expressions, and anonymous functions, and extension expressions, which are operator applications (binary operator, member access, or function call) that lead to extending a given expression. + +The expression automaton $A_{e}$ is thus defined as the union of base expression automata $A_{x}, A_{l}, A_{(e)}$ , and $A_{(\overline{p})} \Rightarrow e$ , with potential extensions $A_{\odot e}, A_{.n}$ , and $A_{(\overline{e})}$ . The individual base and extension automata are constructed by concatenating the respective terminal automata and recursively $A_{e}$ . Additionally, we restrict the type of the recursive $A_{e}$ if the restriction is required by the type system, e.g., for parsing call parameters with a fixed type. We provide additional details on this restriction in Appendix A.2. Since an expression can end after either base or extensions, accepting states of both base and extending automata are accepting states of $A_{e}$ . To implement extensions, we start from the base expression automata and recursively adjust $A_{e}$ 's transition function $\delta_{e}$ by adding outgoing edges from the accepting states of the current automaton to the initial states of the extending automata, or formally: + +$$ +\forall X, Y: \delta_ {e} (q _ {Y} ^ {X}, c) := \left\{ \begin{array}{l l} \delta_ {Y} (q _ {Y} ^ {X}, c) \cup \delta_ {e} (I _ {(\overline {{e}})} ^ {X}, c) \cup \delta_ {e} (I _ {\odot e} ^ {X}, c) \cup \delta_ {e} (I _ {. n} ^ {X}, c) & \text {i f q _ {Y} ^ {X} \in F _ {Y}} \\ \delta_ {Y} (q _ {Y} ^ {X}, c) & \text {o t h e r w i s e ,} \end{array} \right. +$$ + +where the labels $X$ and $Y$ for a state $q_{Y}^{X}$ represent that a string $X$ has been parsed, and currently the active automaton is $A_{Y}$ , which can be one of the following: $A_{x}, A_{l}, A_{(e)}, A_{(\overline{p})} \Rightarrow e, A_{\odot e}, A_{.n}$ , and $A_{(\overline{e})}$ . The superscripts are useful for tracking the currently expressed type, enabling us to determine the validity of extensions and transition to type-restricted expressions based on $L_{B}$ 's typing rules. For instance, for state $q^{42}$ , the addition operator extension $+e$ and function call extension $(\overline{e})$ are syntactically applicable to 42 of type number. While the addition operator with type signature number + number :number is allowed, we can not apply a function call on number. In general, we set $I_{Y}^{X} := \emptyset$ when $Y$ is an invalid extension to $X$ . Moreover, for the extension $+e$ to be valid, $e$ must be of type number. To this end, we transition to a type-restricted expression automaton by setting $I_{+e}^{42}$ to the set of initial states for $A_{+} \circ (A_{e} \downarrow \text{number})$ . Similar to the recursive type automaton, our definition of $A_{e}$ ensures a finite set of initial states and a decidable accepting set. + +Type-Constrained Expressions. To implement $A_{e} \downarrow T$ , we must determine whether a partial expression $s$ can be completed to inhabit type $T$ . Completing $s$ without any extension can lead to a possible set of types and repeated extensions can further alter the result type, but we are not guaranteed that the desired type can be reached. Moreover, extensions can be applied indefinitely, prohibiting an exhaustive search of possible completions. + +We therefore develop a two-tiered algorithm, which we describe in the following paragraphs. This algorithm first identifies the derivable types DERIVABLE $(q_{s})$ of $s$ based on its current state $q_{s}$ . DERIVABLE $(q_{s})$ refers to the set of inhabitable types for all possible expressions completed from $s$ without extension. Second, a type reachability search REACHABLE $(\text{DERIVABLE}(q_{e}), T)$ is performed to determine if $T$ can be inhabited by extending from the derivable types of $s$ . + +We prune automaton transitions when this type search returns a negative result. To ensure the prefix property, the performed search is sound, i.e., it only returns a positive result if $T$ can be expressed by a valid sequence of extensions. This also aligns with our goal of generating only well-typed programs, ensuring that our expression automata accept a subset of all well-typed expressions of $L_{B}$ . To ensure termination, the search is incomplete, i.e., there may be a valid sequence of transitions to express $T$ which is not found by the search and we may end up disallowing generation of a well-typed expression. However, it only avoids traversing types of high complexity that are less likely to occur in practice. We further empirically ensure that our approach is practically effective (§5). + +Derivable Types. For the first part of the algorithm, we determine all types inhabitable by the currently parsed expression $s$ without extension, i.e., $\mathrm{DERIVABLE}(q_s)$ . For example, while parsing partial identifier $x$ in the type environment $\Gamma := \{(x : \text{number}), (xy : \text{string})\}$ , we have $\mathrm{DERIVABLE}(q_x) = \{\text{number}, \text{string}\}$ and $\mathrm{DERIVABLE}(q_{xy}) = \{\text{string}\}$ . For a final state $q$ of expression $e$ , we define $\mathrm{DERIVABLE}(q) := T$ , where $\Gamma \vdash e : T$ . Different expressions impose different + +rules on derivability, and we present the detailed rules in Table 1. Note that for grouped expressions and function literals, we need to enumerate reachable types by recursively contained expressions. To avoid explicitly enumerating all reachable types, we integrate the derivability and reachability algorithms. This optimization is discussed in more detail in Appendix A.4. + +LEMMA 2. For state $q \in \gamma(I_e, s)$ of partial expression $s$ , DERIVABLE( $q$ ) returns all $T$ s.t. exists some suffix $s'$ with $\Gamma \vdash s \circ s': T$ and $s'$ does not involve an extension (operator, call, or member access). + +PROOF. By case distinction on the possible states of partial expressions. + +Type Reachability. To determine which types are inhabitable by extending a base expression $e$ of a given type $T$ (with binary operator, function call, or member access), we analyze sequences of single extension steps with compatible signatures. This process is conceptualized as a search over a graph where types are nodes and extension steps are edges. For every binary operator $\odot$ with the signature $T \odot X : S$ , an edge is created from type $T$ to type $S$ . As an example, the operator for numerical addition $+$ has the signature number $+$ number: number, thereby forming an edge from number to itself. Furthermore, for every member $n$ of type $T$ , we create an edge from $T$ to $\text{LOOKUP}(T, n)$ , e.g., from number to $() =>$ string for the member to string of number type. Finally, we connect each function type $(\overline{p}) => R$ and with its return type $R$ . For instance, $() =>$ string is connected with string. Examples of type graphs can be found in §2.2 and Figure 3. Note that these extension steps are abstract, in the sense that they focus on the type of the expression being extended and the resulting type after extension, not considering textual representation and parameters. + +Table 1. Definition of DERIVABLE(x) for partial expressions introduced in Figure 4. $s \leq s'$ expresses that $s$ is a prefix of $s'$ . pmatch(s, T) determines whether a prefix $s$ partially matches the regular expression of literals of type $T$ . + +
sDERIVABLE(qs)
l{T | pmatch(l,T),T ∈ {number, string, boolean}}
x{T | x ≤ n, (n : T) ∈ Γ}
(¯p) => e{ (¯p) => T | REACHABLE(DERIVABLE(qe),T)}
(e{T | REACHABLE(DERIVABLE(qe),T)}
e ⊙{T | ∃S': Γ ↔ e : S ∧ S ⊕ S': T}
e({R | Γ ↔ e: (¯p) => R}
e.a{S | a ≤ n, Γ ↔ e : T, LOOKUP(T,n) = S}
+ +
Algorithm 2 Our type reachability search algorithm
Input: Current type T of some expression e, goal type G
Output: Whether G can be reached by extending e
1: function REACHABLE(T, G)
2: if T = G then return true▷ The goal type is successfully found
3: if T is marked then return false else mark T▷ Type T is marked to avoid cycles
4: for each valid extension step ⌿ from T do
5: S := the resulting type of applying ⌿ on T
6: if PRUNESEARCH(T, G, S) continue▷ Prune the search to ensure termination
7: if REACHABLE(S, G) return true▷ Recurse to the next round of extension
8: return false▷ No suitable extension is found
+ +The type reachability algorithm, Algorithm 2, implements a depth-first search over this type graph, starting from the current type $T$ , succeeding upon finding goal type $G$ (Line 2), marking any visited types to prevent cycles (Line 3). Then, it proceeds to iterate over all valid extension steps from $T$ (Line 4) and computes the resulting type $S$ after the extension step is applied (Line 5). In the conceptualized type graph, as described in the previous paragraph, this is equivalent to exploring all outgoing edges from $T$ . At Line 7, we proceed to recursively search if $S$ can reach $G$ . If all recursive calls are unsuccessful, the goal type can not be reached (Line 8). + +Some programming languages define self-referential default members, e.g., clone in Java or value0f in TypeScript, which are nullary functions that return a value of the same type as the callee, $(\mathbf{\beta})\Rightarrow T$ for type $T$ . When these members are accessed in functions, higher-order functions can be derived indefinitely. For instance, for a function $f$ with type $(\mathbf{\beta})\Rightarrow S$ , $f.$ value0f has the type $(\mathbf{\beta})\Rightarrow (\mathbf{\beta})\Rightarrow S$ . We therefore need to restrict the type search to a finite set of types to ensure termination. At Line 6 of Algorithm 2, we add a heuristic PRUNESEARCH into the search, which decides where to prune the search process. We develop a simple heuristic based on the results from Gvero et al. [30]. This heuristic prunes exploration of types with higher complexity than goal or source type if they do not contain yet unexplored primitive types, thus preventing exploration of arbitrarily complex types. The details of this heuristic are presented in Appendix A.3. While ensuring termination, our heuristic leads to incompleteness and the potential rejection of well-typed expressions. However, this effect is less pronounced in practical usage, as only highly complex (thus less realistically used) types are avoided. + +We proceed to prove the soundness of Algorithm 2 below. + +LEMMA 3. The type search in Algorithm 2 is sound, i.e., for any expression $e$ with $\Gamma \vdash e : T$ , if REACHABLE(T,G) holds, then there exists a sequence of extensions $y$ such that $\Gamma \vdash e \circ y : G$ . + +Proof. By the design of Algorithm 2, if REACHABLE $(T,G)$ returns true, there is a sequence of $n$ recursive calls to REACHABLE $(T_i,G)$ , with $T_0 = T$ and REACHABLE $(T_n,G) = \text{true}$ . Each $T_i$ ( $i > 0$ ) is derived because some extension $\diamond_i$ is applicable to $T_{i-1}$ based on the typing rules of $L_B$ . We then convert each $\diamond_i$ to its concrete, textual version $\spadesuit_i$ . This representation includes the required well-typed parameters of $\spadesuit_i$ (i.e., for binary operators and non-nullary functions), which are constructed using literals. Finally, we construct $y$ as $\spadesuit_1 \circ \ldots \circ \spadesuit_n$ . + +Note that using any pruning heuristic at Line 6 of Algorithm 2 preserves soundness, which in turn is sufficient to preserve the required prefix property, as defined in Definition 2. We can conclude that the two-tiered search algorithm soundly determines whether the desired target type can be derived from some partial input. Therefore, we conclude that $A_{e} \downarrow T$ and $A_{e}$ are prefix automata that parse a subset of well-typed expressions in $L_{B}$ . + +COROLLARY 4. If REACHABLE( DERIVABLE(q), G) holds for any $q \in \gamma(I_e, s)$ of a partial expression $s$ , then there exists a suffix $s'$ such that $\Gamma \vdash s \circ s': G$ . + +Proof. This conclusion follows directly from Lemmas 2 and 3. + +LEMMA 5. The language parsed by $A_e \downarrow T$ is thus a subset of the expressions of $L_B$ of type $T$ , i.e., $L(A_e \downarrow T) \subseteq \{s \mid \Gamma \vdash s : T\}$ . Since $A_e$ recursively involves $A_e \downarrow T$ , the language parsed by $A_e$ is also a subset of well-typed expressions of $L_B$ , i.e., $L(A_e) \subseteq \{s \mid \exists T : \Gamma \vdash s : T\}$ . + +# 3.5 Prefix Automata for Statements + +We define the remaining automata to capture the complete language $L_{B}$ . The statement automaton is defined recursively as $A_{s} \coloneqq A_{\mathrm{DECL}} \cup A_{\mathrm{EXPR}} \cup A_{\mathrm{RET}} \cup A_{\mathrm{BLOCK}} \cup A_{\mathrm{FUN}} \cup A_{\mathrm{ITE}}$ . The declaration automaton $A_{\mathrm{DECL}} \coloneqq A_{\mathrm{let} x: T}$ ; captures undefined variable names $x$ by accepting all strings, except for existing identifiers. This automaton is a prefix automaton since an accepting state can always be reached by appending characters to the declared identifier. The return statement automaton is $A_{\emptyset}$ when outside a function and restricts the parsed expression to the return type of the surrounding function otherwise. The remaining automata are mainly concatenations of previously defined automata and recursive invocations of $A_{s}$ , with small variations detailed in Appendix A.5. + +Tracking Type Environments. Generally, we follow the typing rules in Figure 6. Identifiers are passed on through all state transitions, matching the rule SEQ, where the type environment of consecutive statements needs to be compatible. However, in the cases of BLOCK, ITE and FUN, we discard the local type environment after parsing, matching the respective typing rules. In FUN additionally, the function signature and parameters are added into the type environment of the function body automaton, and the function signature in the environment of subsequent statements. + +Guaranteeing Return Types. When parsing the body of a function, the transition function of the function automata $A_{\mathrm{FUN}}$ maintains information about the declared return type and the encountered return statements (if any). $A_{\mathrm{FUN}}$ only accepts states where all return values match the declared return type and all execution paths inside the function body return, following $L_B$ 's typing rules in Figure 7. If the current generated statements do not return in all execution paths, another statement is forced to be generated. Since we can always express the requested type through literals, a correct return statement can always be generated and the prefix automaton property is not violated. + +The described rules are straightforward to implement without violating the prefix property as all restrictions are derived only from already parsed input, e.g., the already defined identifiers or the previously declared function return type. We can therefore deduce that the statement automaton is a prefix automaton. Moreover, the automaton accepts all valid statements of $L_{B}$ , with the exception of well-typed expressions rejected by $A_{e}$ . Therefore the parsed language is a subset of $L_{B}$ . + +LEMMA 6. With $A_M \coloneqq A_{\overline{s}}$ it holds that $A_M$ is a prefix automaton and $L(A_M) \subseteq L_B$ . + +# 4 Extension to TypeScript + +We extend our completion engine described in §3 to handle a core subset of modern TypeScript. In this section, we selectively discuss the implementation of several interesting TypeScript features. We provide a comprehensive list of supported and unsupported TypeScript features in Appendix B. + +Constant Variable Declarations. In addition to variable declaration using let, TypeScript supports constant declarations using const. This defines immutable identifiers. We thus additionally track mutability of each identifier in the type environment and disallow applying the assignment operator to immutable identifiers. + +Arrays. We add support for array type annotation, parsing array expressions, and reading from and assigning to array fields. In array expressions, we enforce that all array elements have the same type. Moreover, array types introduce another dimension of type nesting. Therefore we adapt the type reachability pruning heuristic to handle this additional dimension to ensure termination. + +**Loops.** TypeScript supports various loop constructs, including for, while, do-while, and for...of loops. These are implemented mostly as variations of the statement block parser. The for...of loop uniquely constrains the right-hand side of the ..of operator to an array of any type. To adapt the type search, we introduce a generic array type $\bullet[\]$ , which matches any array type. For example, both types number[] and string[] match $\bullet[\]$ in Line 2 of Algorithm 2. + +Additional Operators and Types. We add several arithmetic and logic operators, such as modulo $\%$ , exact equality $= = =$ , logical or $||$ , and the ternary operator $\text{?}$ :. To handle these operators, we add additional edges to the type search graph. Moreover, we add support for post- and prefix operators such as -- and ++, which are only valid extensions to mutable expressions. + +Operator Precedence. TypeScript defines an operator precedence, which determines the implicit grouping of expressions. For example $1 + 2$ . toString() is parsed as $1 + (2$ . toString()) . We adapt our expression parsing algorithm in two places to handle operator precedences. First, in the expression automaton, we leverage the knowledge about previously parsed extensions to determine the implicit grouping and thus where the next operator is applied. For example, for state $q^{1} + 2$ , the member access extension $n$ is applied to 2, as opposed to $1 + 2$ . Second, we adapt the type search in Algorithm 2. Concretely, we ensure that only extensions that can be validly applied based on operator precedence are iterated over. For this, we track the operator precedence of previously parsed extensions and extensions considered during the traversal of the type graph and omit operators in Line 5 that violate operator precedence. + +Global Identifiers and Imports. In TypeScript, many identifiers are defined globally and available in any execution. These global identifiers are incorporated by initializing the type environment of the program automaton accordingly. Identifiers such as Math introduce additional types, which we additionally implement. We also model the import of the crypto library using require. + +Polymorphic Built-In Members. The TypeScript LOOKUP implementation defines a few polymorphic members for built-in types. For example, for array $\times$ of type $T[]$ , $x$ . map(f) takes a callback function $f$ and returns a new array $[f(x[0]), f(x[1]), \ldots]$ . If $f$ has type $(T) => P$ , the returned array has type $P[]$ . Here $P$ is a type parameter, which is instantiated by matching the type of the passed function to the type pattern. + +We support such polymorphisms by adapting the type search. We track type patterns and enforce that type parameters are instantiated before the goal type is reached. We then continue the search from the instantiated version. In the map example, when searching completions of x.map, we first search for functions that instantiate the type parameter, and then continue the search from the instantiated type. When anonymous functions are generated as call parameters, we enforce that the function matches the searched type pattern. + +Type Annotations. TypeScript is designed to be flexible, allowing many type annotations to be omitted when they can be automatically inferred. We generally support this, such as inferring types from initial values. However, it can lead to unexpected types when annotations are omitted, often confusing even experienced developers [47, 48]. Moreover, in the context of LLM-based code generation, having more type annotations can provide valuable information for both the model and our type-constraining algorithms. We have identified three situations where generated code often fails to compile without type annotations, prompting us to enforce them. First, we require + +annotations for all function parameters and return types. Second, all variable declarations must either have a type annotation or be initialized with a value. Third, we enforce type annotations for the first parameter of anonymous functions used as callbacks in the polymorphic built-in member reduce. These constraints trade off practical correctness with theoretical language completeness. + +# 5 Experimental Evaluation + +We present an extensive evaluation of our type constraining approach on a variety of tasks and models. We outline our experimental setup (§5.1), evaluate the impact on compilation errors and functional correctness (§5.2), perform runtime analysis (§5.3), and present case studies (§5.4). + +# 5.1 Experimental Setup + +We now outline our main evaluation setup, covering implementation, evaluated tasks, considered models, compared methods, and metrics. We provide further setup details and hyperparameter choices in Appendix B. + +Implementation. Our implementation is written in Python and contains 11249 lines of code. To ensure robust implementation, we built a large set of around four hundred unit tests and frequently compared the behaviors of our implementation with the official TypeScript compiler [42]. + +Tasks and Benchmarks. We evaluate three relevant tasks of code generation: + +- Synthesis: Given a natural language task description and a function header, the task is to generate a solution from scratch. +- Translation: Given a function written in Python and the header of an equivalent TypeScript function, the task is to generate the body of the equivalent function in TypeScript. +- Repair: Given a natural language task description, a non-compilable solution, the corresponding compiler error, and the function header, the task is to restore functionality of the flawed solution by resolving the compilation error. + +The benchmarks for these tasks are based on TypeScript-translated tasks from HumanEval [12] and MBPP [5], contained in the MultiPL-E dataset [13], with 159 and 384 instances each. We observe that success in generating valid code for the same sample can vary depending on the random seed used. To obtain more comprehensive results on the small HumanEval dataset, we generate each sample 4 times with different seeds and aggregate the outcomes. In MBPP, we generate each sample once. For Repair, we collect all non-compiling programs from the unconstrained synthesis task for all models, resulting in 292 and 248 instances for HumanEval and MBPP each. + +Models. We use 6 different open-weight LLMs, covering 3 LLMs of varying parameter sizes from the same model family and 4 models of a similar size from different model families: the Gemma 2 model family with 2B/9B/27B parameters [64], DeepSeekCoder 33B (abbreviated as DSCoder 33B) [28], CodeLlama 34B [59], and Qwen2.5 32B [73]. For all evaluated LLMs we choose the instruction-tuned variants, which are fine-tuned to follow instructions in a chat-style interaction, such that they adequately attempt to resolve the presented tasks. + +Compared Methods. We run unconstrained LLM sampling, reported as Vanilla. We measure the upper bound improvement of prior syntactic constraining methods [8, 57, 66] by assuming that all syntactically incorrect instances generated by Vanilla could be compiled under syntactic constraining. We refer to this improvement as idealized Syntax. We separately sample using type-constrained decoding based on our completion engine introduced in §3 and §4, and report it as Types. Due to the size and complexity of the full TypeScript compiler, featuring over 427,105 lines of code in 698 files [42], our extension does not cover all features of TypeScript. We therefore + +Table 2. Number of instances with compiler errors in unconstrained generation (Vanilla), idealized syntax-only constraining (Syntax), and our proposed type constraining (Types). Type constraining reduces compiler errors by $74.8\%$ and $56.0\%$ in the synthesis of HumanEval and MBPP problems respectively, compared to only $9.0\%$ and $4.8\%$ ideal improvement on the two datasets respectively through syntax-only constraining. + +
ModelSynthesisTranslationRepair
VanillaSyntaxTypesVanillaSyntaxTypesVanillaSyntaxTypes
HumanEvalGemma 2 2B10392↓10.7%44↓57.3%177149↓15.8%80↓54.8%194181↓6.7%103↓46.9%
Gemma 2 9B4541↓8.9%13↓71.1%7563↓16.0%16↓78.7%113108↓4.4%52↓54.0%
Gemma 2 27B1513↓13.3%2↓86.7%2020↓0.0%3↓85.0%4540↓11.1%22↓51.1%
DS Coder 33B2625↓3.8%5↓80.8%1817↓5.6%7↓61.1%3636↓0.0%15↓58.3%
CodeLlama 34B8671↓17.4%28↓67.4%158124↓21.5%59↓62.7%153142↓7.2%48↓68.6%
Qwen2.5 32B1717↓0.0%2↓88.2%2421↓12.5%5↓79.2%3634↓5.6%13↓63.9%
MBPPGemma 2 2B6764↓4.5%27↓59.7%126111↓11.9%79↓37.3%194184↓5.2%108↓44.3%
Gemma 2 9B3029↓3.3%10↓66.7%6761↓9.0%33↓50.7%129124↓3.9%63↓51.2%
Gemma 2 27B2019↓5.0%7↓65.0%3736↓2.7%22↓40.5%7169↓2.8%32↓54.9%
DS Coder 33B3232↓0.0%19↓40.6%2927↓6.9%13↓55.2%9090↓0.0%43↓52.2%
CodeLlama 34B8071↓11.2%41↓48.8%126114↓9.5%54↓57.1%157148↓5.7%76↓51.6%
Qwen2.5 32B1918↓5.3%13↓31.6%2222↓0.0%16↓27.3%5552↓5.5%29↓47.3%
+ +emulate a type constraining that supports the entire TypeScript feature set. Concretely, if a sample compiles correctly without any constraining, we report it as-is. Otherwise, we report the result of a constrained resample. For all methods, if generation takes more than 300 seconds, we report the partial program generated until the timeout. + +Metrics. We compute two main metrics to assess the effectiveness of the compared methods. First, we determine the number of compiler errors in model-generated outputs. We count as a compiler error any case in which the TypeScript compiler [42] reports an issue during compilation. To measure functional correctness, we leverage the pass@1 metric [14], which measures the percentage of code generations that pass the provided unit tests given only one trial. + +# 5.2 Results on Compilation and Functional Correctness + +In this section, we present our experimental results, showing that on all three code-generation-related tasks, our type constraining approach significantly improves the considered LLMs in generating both compileable and functionally correct code. It also substantially outperforms syntax-only constraining. + +Reduction of Compilation Errors. In Table 2, we present the number of compilation errors produced by each compared method. For synthesis and translation, in the unconstrained setting (Vanilla), on average only $9.0\%$ and $4.9\%$ of the non-compiling instances in HumanEval and MBPP respectively are due to syntactic errors (Syntax), with Qwen2.5 32B even making no syntax errors at all for HumanEval synthesis and MBPP translation. In contrast, type constraining reduces compilation errors by more than half, i.e., by $75.3\%$ and $52.1\%$ on HumanEval and MBPP respectively. We observe that models across all sizes and families benefit similarly from our constraining, with a minimum error reduction of $54.8\%$ and $27.3\%$ on HumanEval and MBPP respectively, highlighting the general effectiveness of our approach. + +A straightforward way to improve successful compilation of LLM-generated code is to feed the erroneous code and the error message back to an LLM for correction – our repair task. Thanks + +Table 3. pass@1 of unconstrained generation (Vanilla) and type constraining (Types). The benefit of our type-constraining approach transfers from reduced compilation errors to improved functional correctness. + +
ModelSynthesisTranslationRepair
VanillaTypesVanillaTypesVanillaTypes
HumanEvalGemma 2 2B29.130.2↑3.8%50.253.9↑7.5%11.620.9↑79.4%
Gemma 2 9B56.658.3↑3.1%73.778.3↑6.2%24.034.9↑45.7%
Gemma 2 27B69.571.2↑2.5%86.687.7↑1.3%38.441.1↑7.1%
DS Coder 33B68.971.1↑3.2%88.790.1↑1.6%47.650.7↑6.5%
CodeLlama 34B41.043.4↑5.7%58.663.5↑8.3%17.527.4↑56.9%
Qwen2.5 32B79.681.8↑2.8%92.193.9↑1.9%65.471.2↑8.9%
MBPPGemma 2 2B40.442.4↑5.2%52.356.0↑7.0%12.122.6↑86.7%
Gemma 2 9B65.467.4↑3.2%71.475.8↑6.2%24.231.9↑31.7%
Gemma 2 27B70.672.1↑2.2%83.184.4↑1.6%39.145.2↑15.5%
DS Coder 33B65.467.2↑2.8%85.989.1↑3.6%35.143.1↑23.0%
CodeLlama 34B42.245.6↑8.0%55.763.3↑13.6%15.726.6↑69.2%
Qwen2.5 32B76.376.6↑0.3%89.690.4↑0.9%48.054.0↑12.6%
+ +to its general applicability, our type constraining approach can also enhance this process. Our experimental results in the setting of code repair are also depicted in Table 2. We find that, in the vanilla setting, many models struggle to correctly localize and resolve compilation errors, with Gemma 2 2B for example repairing only $33.5\%$ and $25.8\%$ of the non-compiling HumanEval and MBPP instances, respectively. This is substantially increased to $56.4\%$ and $58.4\%$ through type constraining. On average, using type-constrained sampling, $53.7\%$ more compilation errors are resolved than using vanilla LLM decoding. + +Improving Functional Correctness. Programs that do not compile are always functionally incorrect. With our type constraining method, non-compilable generations can be turned into well-formed ones, offering the possibility of achieving functional correctness. In Table 3, we experimentally show that type constraining universally improves the functional correctness of LLM-generated code. On the three tasks considered, employing type constraining improves LLMs' pass@1 rate, achieving an average increase by $3.5\%$ in synthesis, $5.0\%$ in translation, and $37.0\%$ in repair tasks. The larger improvement in the latter is due to vanilla LLMs generally struggling to generate functionally correct code. One interesting phenomenon is that, for stronger models, + +constraints more likely lead to recovering functionally correct code. For example on the synthesis task, for Gemma 2 27B, out of the 26 instances that required resampling to compile successfully, 17 are also functionally correct. For Qwen2.5 32B, 15 out of 21 such instances were correct. + +# 5.3 Runtime Analysis + +As discussed in §2, compared with vanilla LLM decoding, our constrained decoding algorithm runs an additional loop (Line 4 of Algorithm 1), where tokens are sampled from an LLM-produced next-token probability distribu + +tion and checked against the completion engine. In this section, we investigate how this process introduces additional runtime overhead for our type constraining. Note that for each selected token, + +Table 4. Median time per synthesis instance in seconds spent by our type-constrained decoding and its relative increase compared with unconstrained decoding (Vanilla). + +
ModelHumanEvalMBPP
Gemma 2 2B6.7↑38.3%6.3↑35.4%
Gemma 2 9B8.3↑29.2%9.5↑46.8%
Gemma 2 27B11.7↑19.9%11.7↑32.8%
DS Coder 33B11.5↑36.2%9.4↑59.5%
CodeLlama 34B7.6↑40.8%7.0↑37.6%
Qwen2.5 32B7.3↑39.6%4.9↑54.8%
+ +vanilla and constrained decoding both run LLM inference only once, meaning that there is no extra overhead from LLM inference in constrained decoding. + +Overhead of Type Constraining. For an application of our method in practice, the effective runtime increase due to constrained decoding is highly relevant. To assess it, we measure the runtime per synthesis instance in HumanEval and MBPP for both unconstrained and type-constrained decoding. We report the median runtime per instance for type constraining and its relative increase to unconstrained decoding in Table 4. On average over the evaluated models, we observe a relative increase of $39.1\%$ and $52.1\%$ in HumanEval and MBPP respectively. We consider this impact to be bearable for the observed significant decrease in compilation errors. Moreover, this is measured on an unoptimized, Python-based implementation and could be significantly improved by a more system-oriented implementation, such as the one proposed by Dong et al. [18]. + +Number of Sample-and-Check Loop Iterations. To provide an in-depth analysis of the overhead of our type constraining method, we measure the number of iterations spent by the sample-and-check loop to find an admissible token. The results are provided in Figure 8. We observe that the number of loop iterations follows a long-tail distribution. For $99.4\%$ of cases, only one loop iteration is needed. This number is even higher for stronger models, with Gemma 2 9B and 27B requiring one iteration in $99.6\%$ and $99.9\%$ of cases, respectively. This means that, in most instances, LLMs can generate a valid token on the first attempt, which is then verified by the completion engine. In cases where more than one iteration is needed, the completion engine intervenes to guide the selection + +![](images/847ef4f35ce5a95c759a2415d323273e8c7cbd2578acc0a62740b9dd06115d1b.jpg) +Figure 8. Histogram on the number of iterations consumed by the sample-and-check loop at Line 4 of Algorithm 1 to find a valid token, measured with Gemma 2 2B for HumanEval synthesis. + +of valid tokens. These interventions help resolve errors in many instances in our benchmarks, providing significant benefit, as discussed in §5.2. + +Prior work [8, 57, 66] implemented constrained decoding differently than Algorithm 1. Instead of running the sample-and-check loop, they execute the completion engine for all tokens in the LLM's vocabulary, mask out all invalid tokens, and sample once from the remaining valid tokens based on their normalized likelihoods. This implementation is less efficient than ours, especially when calling the completion engine is costly. Based on the long-tail distribution depicted in Figure 8, our implementation requires only a single invocation to the completion engine for most cases, whereas their implementation calls it for every token in the LLM's vocabulary for all cases. In our experiments, their implementation results in timeouts for all benchmark instances, with a timeout of 5 minutes per instance. This justifies our choice of using the sample-and-check loop. + +# 5.4 Case Study + +For a qualitative evaluation, we manually inspect instances where unconstrained decoding fails and our type constraining approach successfully corrects errors. We find that our technique effectively amends various types of compilation errors. We showcase three such examples in Figure 9. + +Wrong Parameter Count. In Figure 9a, the task is to find all words in a string that contain a number of consonants. When Gemma 2 2B attempts to solve this problem, translating from the Python version to TypeScript, it calls split without arguments. This is allowed in Python, but not in TypeScript, where a single string argument is required. Type constraining correctly detects this and guides the generation to sample a space character as parameter. + +
VanillaTypes
(a)// find all words with consonants function select_words(s: string): string[] { for (const word of s. split()// find all words with consonants function select_words(s: string): string[] { for (const word of s. split(' ')
(b)function largest_divisor(n: number): number { for(let i = n - 1; i > 0; i-- ) { if(n % i == 0) return i; } // No return }function largest_divisor(n: number): number { for(let i = n - 1; i > 0; i-- ) { if(n % i == 0) return i; } return 1; }
(c)// insert a delimiter between every element return nums.reduce( (acc, curr, index) => { acc.pushurr); return acc; }, [])// insert a delimiter between every element return nums.reduce( (acc: number[] , num, index) => { acc.pushurr); return acc; }, [])
+ +Figure 9. Three examples illustrating the effect of type-constrained sampling. Left are unconstrained generations with problematic tokens highlighted in red, and right are type-constrained results with corrected tokens highlighted in green, adapted for clarity. In (a), Gemma 2 2B attempts to call split, missing required arguments. In (b), DeepSeekCoder 33B attempts to complete a function without a guaranteed return. The issue is resolved by forcing generation of another statement after the main loop. In (c), Gemma 2 9B calls reduce with an anonymous function without type annotation. This leads to an incorrect type inference for the first parameter. The issue is solved by guiding the model to add type annotation. + +Missing Return Statement. In Figure 9b, to complete function largest_divisor, the model must compute a straightforward divisor loop. DeepSeekCoder 33B Instruct [28] implements a correct loop, but does not guarantee returning a value in every execution path. When the return statement in the loop is never executed, e.g., for negative inputs, the function thus returns undefined, violating the type rules. Our method detects this issue and forces the generation of another statement in the function body, resulting in a correct fallback return statement. + +Incorrect Type Inference. In Figure 9c, the task is to insert a delimiter between every element in an array. Gemma 2 9B solves this with the reduce function. This generic function accepts two arguments; first, a callback function that is called consecutively for every element in the array and accumulates a result, second, an initial value for the callback function. The type of the accumulator of the callback is derived implicitly from the second argument, which is an empty array in the given example. TypeScript infers special type never[] for the empty array, disallowing inserting curr of type number through push. Therefore, the program fails to compile. This issue is a well-known limitation of the TypeScript compiler, often confusing even expert developers [47, 48]. Our method resolves it by enforcing adequate type annotation on the first argument of the callback function. + +# 6 Discussion + +Our general type constraining approach, backed by strong experimental results, opens exciting avenues for future research, which we discuss below. + +Implementation Effort. Developing a completion engine for a target programming language currently requires manual efforts. However, we expect that the involved effort to adopt our method to other languages will be reduced significantly, as many features transfer from our implementation for $L_{B}$ and TypeScript. Moreover, we believe that, due to the huge impact on LLM's code generation, the effort will pay off. Future programming language developers may consider generally writing + +```javascript +function sort_threel(number[],r:number[]):number[]{ +for(let $\mathrm{i} = 0$ ;i<1.length; $\mathrm{i + + }$ ){ +r.push(l[i].toString().slice(0,3).concat(l[i].ToString().split())'.split').reverse() .join(')).split''.reverse().join('').ToString() $^+$ l[i].ToString().slice(3).split')… +``` + +Figure 10. Complications errors remain when the model does not terminate after a corrected token. In this example for synthesis on the HumanEval task #33, CodeLlama 34B is steered away from accessing non-existing member .sort and instead accesses .string. + +their compilers as an incremental completion engine, which additionally enables automatic adoption for constrained code generation, besides conventional grammar parsing and type checking. + +Broader Application to More Complex Tasks and Stronger LLMs. Stronger LLMs, such as the latest OpenAI models [33], may make fewer typing errors on the HumanEval and MBPP datasets. Our evaluation results in Table 2 also demonstrate that compilation errors decrease with increasing model size for the Gemma family. However, recent findings showed that currently, even the strongest LLMs struggle with generating compilable code for more complex coding tasks, stricter typing rules, and low-resource languages (e.g., new DSLs). Gusanidas [29] evaluated various state-of-the-art LLMs on difficult code synthesis tasks in Rust, reporting compilation error rates of $18\%$ for OpenAI o1-mini [33], $39\%$ for DeepSeek R1 [15] and $27\%$ for Anthropic's Claude 3.5 Sonnet [2]. For OCaml and Haskell, which are sparsely represented in LLMs' training data, the error rate is even higher at $40\% - 60\%$ for all models, matching a trend of worse performance on low-resource languages [24, 36]. Pan et al. [54] compiled a large dataset of code translation and found $44.3\%$ of GPT-4-generated code to contain compilation errors. Similarly, Shetty et al. [61] report around $25\%$ compilation errors for C-to-Rust translation using OpenAI o1 models. Our type constraining approach is broadly applicable to all these scenarios and our work presents a promising proof of concept. Future work can consider building upon our approach to address these challenges. + +Constrained decoding in general requires access to the next-token probability distributions produced by LLMs. Currently, commercially available black-box LLM APIs only return sampled tokens and do not offer complete next-token distributions. A possible solution is to integrate our method into the backend of model providers, as was recently implemented for guaranteeing adherence to JSON Schemas [3, 50]. + +Remaining Compiler Errors. We observe that, even though constrained decoding guarantees a valid result upon termination, a considerable amount of compilation errors remain due to non-termination within the token or time limit. We find this to be caused by generation loops, entered when generation is amended by constraints and the LLM is unable to recover. An example is depicted in Figure 10, where CodeLlama 34B tries to access the invalid member sort on an expression of type number. Future work may add additional constraints to force stopping such unconstructive loops and steer the model more strictly, e.g., by limiting the complexity of generated expressions. + +# 7 Related Work + +Code Language Models. Recently, LLMs have gained traction for diverse coding tasks such as code synthesis, repair, or translation [35]. These models are typically trained on datasets containing billions to trillions of tokens and have billions of parameters, with both factors contributing to improved performance in code-related benchmarks [28, 46, 59, 64]. Meanwhile, LLMs are well known to frequently make mistakes [32, 58], and, as we show in this work, even state-of-the-art open-weight models with over 30 billion parameters frequently make errors in code generation. + +Improving Language Model Accuracy. Apart from constrained decoding, three primary approaches have been proposed to enhance the accuracy of language models on code tasks: fine-tuning, retrieval augmentation (RAG), and compiler or execution feedback. Fine-tuning adapts the model weights based on specifically collected training data. This process is highly resource intensive [65, 70]. RAG provides the model with additional context based on a database or related code snippets [6, 57]. Compiler and execution feedback is only available after completing the model generation and requires resampling [16, 34, 69]. However, constrained decoding is orthogonal to these methods and, as indicated by Poesia et al. [57] and our experimental results, combining constrained decoding with RAG or compiler feedback additionally improves model performance. + +Constrained Decoding. Prior work on constrained decoding failed to achieve strong results due to its limitation to syntactic language features. Constraining to context-free languages has been explored extensively in recent work [7, 8, 57, 71]. Simple context-sensitive syntactic features, such as the space indentation in Python and the scope markers in Go have also been implemented [41, 66]. As demonstrated in §5, however, syntax errors on average account for only $6\%$ of compilation errors in recent code models. The rarity of syntax errors significantly reduces the potential of leveraging them for improvements in code correctness. Meanwhile, our type-constrained decoding more than halved compilation errors. + +Type Systems for Code Synthesis. Previous work that leveraged type systems for code synthesis was confined to specialized settings and unable to constrain general, complex program generation. Poesia et al. [57] proposed using known column names to guide SQL query generation. Gvero et al. [30] employed a search on the type graph for function call completion. Agrawal et al. [1] leverage language-server-generated type annotations for object member accesses. Blinn et al. [11] use language-server-derived type information to provide additional context to the LLM, but not to enforce hard constraints. Additionally, type constraints have been used to direct code synthesis based on specialized search procedures [22, 56, 69]. However, these methods are not compatible with LLM-based code generation. This limits their ability to exploit the powerful natural language and general-purpose capabilities of LLMs. + +# 8 Conclusion + +In this work, we explored how type systems in programming languages can be used to guide language models during decoding. Concretely, we design and implement prefix automata to perform type constraining for a foundational simply typed language and then extend it to the popular language TypeScript. We extensively evaluate the impact of using such constraints for code synthesis, translation, and repair and observe that we more than halve compilation errors on a diverse set of models and consistently increase functional correctness. We further explore qualitatively how the constraining positively impacts code generation. We conclude that such type constraining should be implemented for more programming languages, and has the potential to generally improve code generation in many domains. + +# Artifact Availability + +The artifact for this paper, including source code, datasets, and reproductions scripts, is available on GitHub (https://github.com/eth-sri/type-constrained-code-generation) and Zenodo [45]. + +# Acknowledgements + +We would like to thank the anonymous reviewers for their in-depth and constructive feedback, and the artifact reviewers for their feedback on our artifact accessibility. + +# References + +[1] Lakshya Agrawal, Aditya Kanade, Navin Goyal, Shuvendu K Lahiri, and Sriram Rajamani. 2023. Monitor-Guided Decoding of Code LMs with Static Analysis of Repository Context. In NeurIPS. https://openreview.net/forum?id=qPUbKxKvXq +[2] Anthropic. [n.d.]. Claude 3 Model Card. https://assets.anthropic.com/m/61e7d27f8c8f5919/original/Claude-3-ModelCard.pdf Accessed: March 10, 2025. +[3] Anthropic. 2025. JSON Mode. https://docs.anthropic.com/en/docs/build-with-claude/tool-use#json-mode Accessed: March 10, 2025. +[4] Ken Arnold and James Gosling. 1996. The Java Programming Language. +[5] Jacob Austin, Augustus Odena, Maxwell I. Nye, Maarten Bosma, Henryk Michalewski, David Dohan, Ellen Jiang, Carrie J. Cai, Michael Terry, Quoc V. Le, et al. 2021. Program Synthesis with Large Language Models. arXiv Preprint (2021). https://arxiv.org/abs/2108.07732 +[6] Nastaran Bassamzadeh and Chhaya Methani. 2024. A Comparative Study of DSL Code Generation: Fine-Tuning vs. Optimized Retrieval Augmentation. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2407.02742 +[7] Luca Beurer-Kellner, Marc Fischer, and Martin Vechev. 2023. Prompting Is Programming: A Query Language for Large Language Models. PLDI (2023). https://doi.org/10.1145/3591300 +[8] Luca Beurer-Kellner, Marc Fischer, and Martin Vechev. 2024. Guiding LLMs The Right Way: Fast, Non-Invasive Constrained Generation. In ICML. https://openreview.net/forum?id=pXaEYzrFae +[9] Satwik Bhattachamishra, Kabir Ahuja, and Navin Goyal. 2020. On the Ability and Limitations of Transformers to Recognize Formal Languages. In EMNLP. https://doi.org/10.18653/v1/2020.emnlp-main.576 +[10] Gavin M. Bierman, Martin Abadi, and Mads Torgersen. 2014. Understanding TypeScript. In ECOOP. +[11] Andrew Blinn, Xiang Li, June Hyung Kim, and Cyrus Omar. 2024. Statically Contextualizing Large Language Models with Typed Holes. OOPSLA (2024). https://doi.org/10.1145/3689728 +[12] Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. 2020. Language Models are Few-Shot Learners. In NeurIPS. https://proceedings.neurips.cc/paper/2020/bash/1457c0d6bfcb4967418bf8ac142f64a-Abstract.html +[13] Federico Cassano, John Gouwar, Daniel Nguyen, Sydney Nguyen, Luna Phipps-Costin, Donald Pinckney, Ming-Ho Yee, Yangtian Zi, Carolyn Jane Anderson, Molly Q. Feldman, et al. 2023. MultiPL-E: A Scalable and Polyglot Approach to Benchmarking Neural Code Generation. IEEE Trans. Software Eng. (2023). +[14] Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Pondé de Oliveira Pinto, Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, Greg Brockman, et al. 2021. Evaluating Large Language Models Trained on Code. arXiv Preprint (2021). https://arxiv.org/abs/2107.03374 +[15] DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, et al. 2025. DeepSeek-R1: Incentivizing Reasoning Capability in LLMs via Reinforcement Learning. arXiv Preprint (2025). https://doi.org/10.48550/arXiv.2501.12948 +[16] Pantazis Deligiannis, Akash Lal, Nikita Mehrotra, Rishi Poddar, and Aseem Rastogi. 2025. RustAssistant: Using LLMs to Fix Compilation Errors in Rust Code. In ICSE. https://www.microsoft.com/en-us/research/publication/rustassistant-using-llms-to-fix-compiler-errors-in-rust-code/ +[17] TypeScript Developers. [n.d.]. TypeScript: Documentation – More on Functions. https://www.typescriptlang.org/docs/handbook/2/functions.html#function-type-expressions Accessed: March 10, 2025. +[18] Yixin Dong, Charlie F. Ruan, Yaxing Cai, Ruihang Lai, Ziyi Xu, Yilong Zhao, and Tianqi Chen. 2024. XGrammar: Flexible and Efficient Structured Generation Engine for Large Language Models. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2411.15100 +[19] Alan AA Donovan and Brian W Kernighan. 2015. The Go programming language. +[20] Shihan Dou, Haoxiang Jia, Shenxi Wu, Huiyuan Zheng, Weikang Zhou, Muling Wu, Mingxu Chai, Jessica Fan, Caishuang Huang, Yunbo Tao, et al. 2024. What's Wrong with Your Code Generated by Large Language Models? An Extensive Study. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2407.06153 + +[21] Javid Ebrahimi, Dhruv Gelda, and Wei Zhang. 2020. How Can Self-Attention Networks Recognize Dyck-n Languages?. In EMNLP. https://aclanthology.org/2020-findings-emnlp.384/ +[22] Jonás Fiala, Shachar Itzhaky, Peter Müller, Nadia Polikarpova, and Ilya Sergey. 2023. Leveraging Rust Types for Program Synthesis. PLDI (2023). https://doi.org/10.1145/3591278 +[23] Zheng Gao, Christian Bird, and Earl T. Barr. 2017. To type or not to type: quantifying detectable bugs in JavaScript. In ICSE. https://doi.org/10.1109/ICSE.2017.75 +[24] Alessandro Giagnorio, Alberto Martin-Lopez, and Gabriele Bavota. 2025. Enhancing Code Generation for Low-Resource Languages: No Silver Bullet. arXiv Preprint (2025). https://doi.org/10.48550/arXiv.2501.19085 +[25] GitHub. [n.d.]. https://github.com/features/copilot +[26] GitHub. 2022. The top programming languages. https://octoverse.github.com/2022/top-programming-languages +[27] Aaron Grattaflori, Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Alex Vaughan, et al. 2024. The Llama 3 Herd of Models. ArXiv Preprint (2024). https://arxiv.org/abs/2407.21783 +[28] Daya Guo, Qihao Zhu, Dejian Yang, Zhenda Xie, Kai Dong, Wentao Zhang, Guanting Chen, Xiao Bi, Y. Wu, Y. K. Li, et al. 2024. DeepSeek-Coder: When the Large Language Model Meets Programming - The Rise of Code Intelligence. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2401.14196 +[29] Gusanidas. [n.d.]. Compilation Benchmark. https://github.com/Gusanidas/compilation-benchmark Accessed: March 10, 2025. +[30] Tihomir Gvero, Viktor Kuncak, Ivan Kuraj, and Ruzica Piskac. 2013. Complete completion using types and weights. In PLDI. https://doi.org/10.1145/2491956.2462192 +[31] John E. Hopcroft and Jeffrey D. Ullman. 1979. Introduction to Automata Theory, Languages and Computation. +[32] Lei Huang, Wejiang Yu, Weitao Ma, Weihong Zhong, Zhangyin Feng, Haotian Wang, Qianglong Chen, Weihua Peng, Xiaocheng Feng, Bing Qin, et al. 2023. A Survey on Hallucination in Large Language Models: Principles, Taxonomy, Challenges, and Open Questions. arXiv Preprint (2023). https://doi.org/10.48550/arXiv.2311.05232 +[33] Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. 2024. OpenAI o1 System Card. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2412.16720 +[34] Prithwish Jana, Piyush Jha, Haoyang Ju, Gautham Kishore, Aryan Mahajan, and Vijay Ganesh. 2024. CoTran: An LLM-Based Code Translator Using Reinforcement Learning with Feedback from Compiler and Symbolic Execution. In ECAI (Frontiers in Artificial Intelligence and Applications). https://doi.org/10.3233/FAIA240968 +[35] Juyong Jiang, Fan Wang, Jiasi Shen, Sungju Kim, and Sunghun Kim. 2024. A Survey on Large Language Models for Code Generation. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2406.00515 +[36] Sathvik Joel, Jie JW Wu, and Fatemeh H. Fard. 2024. Survey on Code Generation for Low resource and Domain Specific Programming Languages. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2410.03981 +[37] Anton Lozhkov, Raymond Li, Loubna Ben Allal, Federico Cassano, Joel Lamy-Poirier, Nouamane Tazi, Ao Tang, Dmytro Pykhtar, Jiawei Liu, Yuxiang Wei, et al. 2024. StarCoder 2 and The Stack v2: The Next Generation. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2402.19173 +[38] Madnight. 2024. GitHub 2.0. https://madnight.github.io/git/#/pull_requestes/2024/1 +[39] Harry G. Mairson. 2004. Linear lambda calculus and PTIME-completeness. J. Funct. Program. (2004). https://doi.org/10.1017/S0956796804005131 +[40] Nicholas D Matsakis and Felix S Klock. 2014. The rust language. ACM SIGAda Ada Letters (2014). +[41] Daniel Melcer, Nathan Fulton, Sanjay Krishna Gouda, and Haifeng Qian. 2024. Constrained Decoding for Fill-in-the-Middle Code Language Models via Efficient Left and Right Quotienting of Context-Sensitive Grammars. (2024). https://arxiv.org/abs/2402.17988 +[42] Microsoft. 2024. TypeScript. https://github.com/microsoft/TypeScript. Accessed on November 9, 2024, commit #ef802b1. +[43] John C. MITCHELL. 1990. Type Systems for Programming Languages. In Formal Models and Semantics. https://www.sciencedirect.com/science/article/pii/B9780444880741500135 +[44] Niklas Muennighoff, Qian Liu, Armel Randy Zebaze, Qinkai Zheng, Binyuan Hui, Terry Yue Zhuo, Swayam Singh, Xiangru Tang, Leandro von Werra, and Shayne Longpre. 2024. OctoPack: Instruction Tuning Code Large Language Models. In ICLR. https://openreview.net/forum?id=mw1PWNSWZP +[45] Niels Mündler, Jingxuan He, Hao Wang, Koushik Sen, Dawn Song, and Martin Vechev. 2025. Reproduction Package for "Type-Constrained Code Generation with Language Models". doi:10.5281/zenodo.15355889 +[46] Niels Mündler, Mark Niklas Müller, Jingxuan He, and Martin Vechev. 2024. SWT-Bench: Testing and Validating Real-World Bug-Fixes with Code Agents. In NeurIPS. http://papers.nips.cc/paper_files/paper/2024/bit/94f093b41fc2666376fb1f667fe282f3-AbsAbstract-Conference.html + +[47] nielstron. 2024. Incorrect type deducted for accumulator in reduce. https://github.com/microsoft/TypeScript/issues/59999. +[48] nop33. 2024. Wrong inferred initial value in reduce. https://github.com/microsoft/TypeScript/issues/59863. +[49] OpenAI. 2023. GPT-4 Technical Report. arXiv Preprint (2023). https://doi.org/10.48550/arXiv.2303.08774 +[50] OpenAI. 2025. Structured Outputs. https://platform.openai.com/docs/guides/structured-outputs Accessed: March 10, 2025. +[51] Gabriel Orlanski, Kefan Xiao, Xavier Garcia, Jeffrey Hui, Joshua Howland, Jonathan Malmaud, Jacob Austin, Rishabh Singh, and Michele Catasta. 2023. Measuring the Impact of Programming Language Distribution. In ICML. https://proceedings.mlr.press/v202/orlanski23a.html +[52] oxc project. 2024. oxc - The Javascript Oxidation Compiler. https://github.com/oxc-project/oxc. +[53] Rangeet Pan, Ali Reza Ibrahimzada, Rahul Krishna, Divya Sankar, Lambert Pouguem Wassi, Michele Merler, Boris Sobolev, Raju Pavuluri, Saurabh Sinha, and Reyhaneh Jabbarvand. 2024. Lost in Translation: A Study of Bugs Introduced by Large Language Models while Translating Code. In ICSE. https://doi.org/10.1145/3597503.3639226 +[54] Rangeet Pan, Ali Reza Ibrahimzada, Rahul Krishna, Divya Sankar, Lambert Pouguem Wassi, Michele Merler, Boris Sobolev, Raju Pavuluri, Saurabh Sinha, and Reyhaneh Jabbarvand. 2024. Lost in Translation: A Study of Bugs Introduced by Large Language Models while Translating Code. In ICSE. https://doi.org/10.1145/3597503.3639226 +[55] Hammond Pearce, Baleegh Ahmad, Benjamin Tan, Brendan Dolan-Gavitt, and Ramesh Karri. 2022. Asleep at the Keyboard? Assessing the Security of GitHub Copilot's Code Contributions. In S&P. https://doi.org/10.1109/SP46214.2022.9833571 +[56] Daniel Perelman, Sumit Gulwani, Thomas Ball, and Dan Grossman. 2012. Type-directed completion of partial expressions. In PLDI. https://doi.org/10.1145/2254064.2254098 +[57] Gabriel Poesia, Alex Polozov, Vu Le, Ashish Tiwari, Gustavo Soares, Christopher Meek, and Sumit Gulwani. 2022. Synchronesh: Reliable Code Generation from Pre-trained Language Models. In ICLR. https://openreview.net/forum?id=KmtVD97J43e +[58] Vipula Rawte, Amit P. Sheth, and Amitava Das. 2023. A Survey of Hallucination in Large Foundation Models. arXiv Preprint (2023). https://doi.org/10.48550/arXiv.2309.05922 +[59] Baptiste Rozière, Jonas Gehring, Fabian Gloeckle, Sten Sootla, Itai Gat, Xiaqing Ellen Tan, Yossi Adi, Jingyu Liu, Tal Remez, Jérémy Rapin, et al. 2023. Code Llama: Open Foundation Models for Code. arXiv Preprint (2023). https://doi.org/10.48550/arXiv.2308.12950 +[60] Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Neural Machine Translation of Rare Words with Subword Units. In ACL. https://doi.org/10.18653/v1/p16-1162 +[61] Manish Shetty, Naman Jain, Adwait Godbole, Sanjit A. Seshia, and Koushik Sen. 2024. Syzygy: Dual Code-Test C to (safe) Rust Translation using LLMs and Dynamic Analysis. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2412.14234 +[62] Vince Szabo, Dominik Winterer, and Zhendong Su. 2024. Compilation Quotient (CQ): A Metric for the Compilation Hardness of Programming Languages. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2406.04778 +[63] Florian Tambon, Arghavan Moradi Dakhel, Amin Nikanjam, Foutse Khomh, Michel C. Desmarais, and Giuliano Antoniol. 2025. Bugs in large language models generated code: an empirical study. Empir. Softw. Eng. (2025). https://doi.org/10.1007/s10664-025-10614-4 +[64] Gemma Team, Morgane Riviere, Shreya Pathak, Pier Giuseppe Sessa, Cassidy Hardin, Surya Bhupatiraju, Léonard Hussenot, Thomas Mesnard, Bobak Shahriari, Alexandre Ramé, et al. 2024. Gemma 2: Improving Open Language Models at a Practical Size. arXiv Preprint (2024). https://arxiv.org/abs/2408.00118 +[65] Yun-Da Tsai, Mingjie Liu, and Haoxing Ren. 2024. Code Less, Align More: Efficient LLM Fine-tuning for Code Generation with Data Pruning. (2024). https://doi.org/10.48550/arXiv.2407.05040 +[66] Shubham Ugare, Tarun Suresh, Hangoo Kang, Sasa Misailovic, and Gagandeep Singh. 2024. SynCode: LLM Generation with Grammar Augmentation. ArXiv Preprint (2024). https://arxiv.org/abs/2403.01632 +[67] Pawel Urzyczyn. 1997. Inhabitation in Typed Lambda-Calculi (A Syntactic Approach). In TLCA (Lecture Notes in Computer Science). https://doi.org/10.1007/3-540-62688-3_47 +[68] Heidi Vella. 2024. Google turns to AI to write new code; Workforce reduced. https://aibusiness.com/data/google-turns-to-ai-to-write-new-code-workforce-reduced +[69] Yuxiang Wei, Chunqiu Steven Xia, and Lingming Zhang. 2023. Copiloting the Copilots: Fusing Large Language Models with Completion Engines for Automated Program Repair. In ESEC/FSE. https://doi.org/10.1145/3611643.3616271 +[70] Martin Weyssow, Xin Zhou, Kisub Kim, David Lo, and Houari A. Sahraoui. 2023. Exploring Parameter-Efficient Fine-Tuning Techniques for Code Generation with Large Language Models. arXiv Preprint (2023). https://doi.org/10.48550/arXiv.2308.10462 +[71] Brandon T. Willard and Rémi Louf. 2023. Efficient Guided Generation for Large Language Models. arXiv Preprint (2023). https://doi.org/10.48550/arXiv.2307.09702 + +[72] Andy Yang, David Chiang, and Dana Angluin. 2024. Masked Hard-Attention Transformers Recognize Exactly the Star-Free Languages. In NeurIPS. http://papers.nips.cc/paper_files/paper/2024/bit/13d7f172259b11b230cc5da8768abc5f-AAbstract-Conference.html +[73] An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. 2024. Qwen2.5 Technical Report. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2412.15115 +[74] Quanjun Zhang, Chunrong Fang, Yang Xie, Yuxiang Ma, Weisong Sun, Yun Yang, and Zhenyu Chen. 2024. A Systematic Literature Review on Large Language Models for Automated Program Repair. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2405.01466 + +# A Detailed Prefix Automaton Definitions + +In this section, we provide more detailed definitions and analysis of the various automata for $L_{B}$ . + +# A.1 Base Automata + +We now provide detailed definitions for the base prefix automata introduced at the end of §3.2: union, concatenation, Kleene-Star, and terminal. + +Union. For the union $A_X \cup A_Y$ , we define the resulting sets of initial states and accepting states as $I \coloneqq I_X \cup I_Y$ and $F \coloneqq F_X \cup F_Y$ , respectively. The transition function is defined as follows: + +$$ +\delta (q, c) := \left\{ \begin{array}{l l} \delta_ {X} (q, c) & \text {i f} q \in Q _ {X} \\ \delta_ {Y} (q, c) & \text {i f} q \in Q _ {Y}. \end{array} \right. +$$ + +To show that the language parsed by this automaton is indeed the union $L(A_{X} \cup A_{Y}) = L(A_{X}) \cup L(A_{Y})$ , we employ a short helper lemma, which can be shown inductively. + +LEMMA 7. The set of the reachable states from a set of states $\mathbf{q}$ is equal to the union of reachable states from each state in $\mathbf{q}$ , i.e., $\gamma (\mathbf{q},s) = \bigcup_{q\in \mathbf{q}}\gamma (q,s)$ . + +Since the states are distinct and we merely combine the transition functions of both automata, using the lemma, we can quickly see that the language parsed is indeed the union. Moreover, if both $A_{X}$ and $A_{Y}$ are prefix automata, this also holds for $A_{X} \cup A_{Y}$ . + +Concatenation. For the concatenation automaton $A_{XY}$ , we define $I \coloneqq I_X$ , $F \coloneqq F_Y$ , and the transition function as follows: + +$$ +\delta_ {X Y} (q, c) := \left\{ \begin{array}{l l} \delta_ {X} (q, c) & \text {i f} q \in Q _ {X} \backslash F _ {X} \\ \delta_ {X} (q, c) \cup \delta_ {Y} (I _ {Y}, c) & \text {i f} q \in F _ {X} \\ \delta_ {Y} (q, c) & \text {i f} q \in Q _ {Y}. \end{array} \right. +$$ + +Informally, concatenation preserves the parsing behavior of both $A_{X}$ and $A_{Y}$ in their respective states. When $A_{XY}$ reaches an accepting state of $A_{X}$ and receives another input character, it either remains in $A_{X}$ or transitions to $A_{Y}$ , as defined in the second case of $\delta_{XY}$ . Essentially, this maintains outgoing edges from accepting states in $A_{X}$ while adding edges from these accepting states to initial states of $A_{Y}$ . + +It follows from a similar argument that $L(A_{XY}) = L(A_X) \circ L(A_Y)$ , where $L(A_X) \circ L(A_Y)$ is defined as $\{s_X \circ s_Y \mid s_X \in L(A_X), s_Y \in L(A_Y)\}$ . We first show $L(A_{XY}) \subseteq L(A_X) \circ L(A_Y)$ . Due to (P1), we can always split any $s \in L(A_{XY})$ into $s_X$ that extends from $I_X$ to $F_X$ and $s_Y$ that extends from $I_Y$ to $F_Y$ . Then $s_X \in L(A_X)$ and $s_Y \in L(A_Y)$ . For $L(A_X) \circ L(A_Y) \subseteq L(A_X \circ A_Y)$ , we pick any $s_X \circ s_Y$ from $L(A_X) \circ L(A_Y)$ and parse it using $A_{XY}$ . We observe that it will first traverse from $I_X$ to $F_X$ consuming $s_X$ , and then transition through $I_Y$ to $F_Y$ by consuming $s_Y$ . + +Moreover $A_{XY}$ is a prefix automaton, if $A_{X}$ and $A_{Y}$ are prefix automata and $L(A_{Y}) \neq \emptyset$ . Since $A_{X}$ is a prefix automaton, we can reach $F_{X}$ from any state in $Q_{X}$ . From $F_{X}$ we additionally reach + +$I_{Y} \subseteq Q_{Y}$ . Since $A_{Y}$ is a prefix automaton, we can reach $F_{Y}$ for any state in $Q_{Y}$ . This construction is a prefix automaton only if $I_{Y} \neq \emptyset$ , which, due to the prefix property, is equivalent to $L(A_{Y}) \neq \emptyset$ . + +Kleene-Star. We define the Kleene-Star automaton $A_{\overline{X}}$ that parses indefinite repetitions of words accepted by $X$ . First, we consider all initial states as final states, i.e., we ensure $I_X \subseteq F_{\overline{X}}$ . Then we add transitions to the transition function $\delta_X$ from the final states $F_X$ back to the initial states $I_X$ . + +$$ +\delta_ {\overline {{X}}} (q _ {X}, c) := \left\{ \begin{array}{l l} \delta_ {X} (q _ {X}, c) & \text {i f q \not \in F _ {X}} \\ \delta_ {X} (q _ {X}, c) \cup \delta (I _ {X}, c) & \text {i f q _ {X} \in F _ {X}}. \end{array} \right. +$$ + +We can quickly see that $L(A_{\overline{X}}) = \{\overline{s} \mid s \in L(A_X)\}$ , with the same argument as the concatenation automaton. Additionally, because the initial states are accepting, the empty word (zero repetitions) is in $L(A_{\overline{X}})$ . We similarly see that this is a prefix automaton if $A_{X}$ is a prefix automaton. Note that here $L(A_{X}) \neq \emptyset$ is not required. This is because if $L(A_{X}) \neq \emptyset$ , then $A_{\overline{X}} = A_{X} = A_{\emptyset}$ , which is still a prefix automaton. + +Terminals. The terminal automaton $A_{\mathsf{S}}$ parses exactly the terminal S. They accept the usual alphabet $\Sigma$ and feature the states $Q \coloneqq \{q_{\mathsf{s}} \mid \mathsf{s} \text{ is a suffix of S}\}$ , $F \coloneqq \{q_{\varepsilon}\}$ , $I \coloneqq \{q_{\mathsf{S}}\}$ . The transition function $\delta$ is defined as follows: + +$$ +\delta (q _ {s}, c) := \left\{ \begin{array}{l l} \{q _ {s ^ {\prime}} \} & \text {i f c \circ s ^ {\prime} = s} \\ \varnothing & \text {o t h e r w i s e .} \end{array} \right. +$$ + +Clearly $A_{\mathfrak{S}}$ is a prefix automaton. We can show inductively that for any $s: \gamma(q_{s}, s') = \{q_{\varepsilon}\} \Longleftrightarrow s = s'$ , and thus $L(A_{\mathfrak{S}}) = \{\mathfrak{S}\}$ . With a simple modification, we introduce $A_{\mathfrak{s}}^{W}$ , where $W$ denotes whitespace characters. The transition function is defined as $\delta(q_{\mathfrak{s}}^{W}, c) := \{q_{\mathfrak{s}}^{W}\}$ if $c \in W$ ; otherwise, $\delta(q_{c \circ s}^{W}, t) := \{q_{\mathfrak{s}}^{W}\}$ . This allows arbitrary whitespace before parsing $s$ . This is how we implement syntactic indifference to whitespace between terminals. + +# A.2 Expressions + +Expressions are parsed using recursive automatons as introduced in §3.4. In this part of the Appendix, we describe in more detail how information is passed between states. + +Notation. In the following, we will implicitly assume that $\delta(q, c) = \emptyset$ if not explicitly defined otherwise, making notation more concise. For any state, we access the following information through dot notation or the special notation on the state, which we assume is passed to subsequent states through the transition function (unless otherwise stated). This information is alternatively passed through to entire automata in composite automata, e.g., in $A_{XY}$ from $A_X$ to $A_Y$ . + +- $q \in F_X$ : Whether state $q$ is an accepting state of the automaton $A_X$ . +- $q. \Gamma$ : The type environment based on state $q$ currently being parsed. +- $q$ .LHS: The left-hand side expression of an extending expression represented by state $q$ , i.e., when extending $X$ with $Y$ and currently parsing $q_{Y}$ , then $q_{Y}$ .LHS = $X$ . +- $q$ .TYP: The described type of the last coherent expression that this state belongs to. This is only defined for accepting states. Generally, we ensure that when some expression $e$ was parsed, the corresponding state $q_{e}$ has attribute $q_{e}$ .TYP such that $q_{e} \Gamma \vdash e : q_{e}$ .TYP. +- $q \downarrow T$ : Type $T$ to which state $q$ is constrained. + +When accessing the properties of $A$ , we access the property of the current state of the automaton $q \in Q$ , e.g., $A. \mathrm{LHS} = q. \mathrm{LHS}$ . For parsed automata, the current state is the final, accepting state. The TYP attribute expresses the type of the expression parsed so far. In expression states $q$ , we leverage the LHS to accurately determine $q. \mathrm{TYP}$ . + +$$ +\begin{array}{l} q _ {\mathrm {S T R . T Y P}} := \text {s t r i n g} \quad q _ {(e) . \mathrm {T Y P}} := A _ {e}. \mathrm {T Y P} \\ q _ {\text {N U M}}. \text {T Y P} := \text {n u m b e r} \quad q _ {\odot e}. \text {T Y P} := R, \text {f o r} q _ {\odot e}. \text {L H S}. \text {T Y P} = S, A _ {e}. \text {T Y P} = T \text {a n d} S \odot T: R \\ q _ {\text {B O O L . T Y P}} := \text {b o o l e a n} \quad q _ {(\overline {{e}}). \text {T Y P}} := T, \text {f o r} q _ {(\overline {{e}}). \text {L H S . T Y P}} = (\overline {{p}}) \Rightarrow T \\ q _ {x. \mathrm {T Y P}} := T \text {w h e r e} q _ {x}. \Gamma \vdash x: T \quad q. n. \mathrm {T Y P} := T, \text {f o r L O O K U P} (q. n. \mathrm {L H S . T Y P}, n) = T \\ q _ {(\overline {{p}}) = > e. T Y P} := \left(A _ {\overline {{p}}} ^ {-}. T Y P\right) = > A _ {e}. T Y P \\ \end{array} +$$ + +Unrestricted Expressions. The left-hand side of the currently parsed expression is used in the definition of automata for three extending expressions; arithmetic operators, function call, and member access. The arithmetic operator automaton constrains its states to those with valid operators, i.e.: + +$$ +A_{\odot e}:= \bigcup_{\exists R:A_{\odot e}.LHS.TYP\odot T = R}A_{\odot}(\circ A_{e}\downarrow T). +$$ + +For function call, the automaton is only valid if the left-hand side is a function, and accepts only the valid signature. + +$$ +A _ {(\overline {{e}})} := \left\{ \begin{array}{l l} A _ {(} \circ (A _ {\overline {{e}}} \downarrow A _ {\overline {{p}}}. \mathrm {T Y P}) \circ A _ {)} & \text {i f} A _ {(\overline {{e}}). \mathrm {L H S . T Y P}} = (\overline {{p}}) \Rightarrow T \\ A _ {\emptyset} & \text {o t h e r w i s e .} \end{array} \right. +$$ + +Finally, the member access automaton is a union of the automata that parses the attributes of the left-hand side expression. Or formally, + +$$ +A_{\cdot n}:= \bigcup_{\exists T:\text{LOOKUP}(A_{\cdot n}.LHS.TYP},m) = T}A_{\cdot \mathfrak{m}}. +$$ + +Type-Restricted Expressions. The type-restricted versions of the automata are covered by the definitions presented in §3.4. We therefore do not separately list them here. + +# A.3 Pruning the Type Search + +We now present our heuristic for pruning the type search recursion from the prefix automaton for type-constrained expressions in §3.4, i.e., our implementation of PRUNESEARCH at Line 6 of Algorithm 2. The heuristic is based on the complexity and novelty of candidate types to explore. + +Based on the assumptions about the lookup function and the extension expressions in §3.1, we observe a restriction in the reachable types by extensions: from any given type, we reach itself, result types of arithmetic operators via op, return types through CALL, and member types through MEMBER. A higher-order type $(\cdot) \Rightarrow T$ does not allow access to types not reachable from $T$ . Consequently, we avoid exploring such higher-order types unless the target type is of higher order, or the higher-order type offers novel, yet unexplored types. For instance, in Figure 11, the type $(\cdot) \Rightarrow$ number is not explored, because it is more complex than both the initial and goal types, number and string, and does not contain any unexplored type. Meanwhile, $(\cdot) \Rightarrow$ string is explored, as it contains a novel string type. + +To formalize this understanding, we introduce the concepts about the depth and root types of a given type, denoted as $\text{DEPTH}$ and $\text{ROOT}$ , respectively. $\text{DEPTH}$ measures the complexity of a type, specifically the order of a function, while $\text{ROOT}$ returns all types of minimal depth (e.g., string, number, and boolean) that constitute a higher-order type. They are defined as follows: + +$$ +\operatorname {D E P T H} (T) := \left\{ \begin{array}{l l} \operatorname {D E P T H} (S) + 1 & \text {i f} T = (\overline {{p}}) \Rightarrow S, \\ 0 & \text {o t h e r w i s e .} \end{array} \right. \qquad \operatorname {R O O T} (T) := \left\{ \begin{array}{l l} \operatorname {R O O T} (S) & \text {i f} T = (\overline {{p}}) \Rightarrow S, \\ \{T \} & \text {o t h e r w i s e .} \end{array} \right. +$$ + +We leverage DEPTH and ROOT to implement PRUNESEARCH $(T,G,S)$ for a current type $T$ , a goal type $G$ , and a type $S$ after an extension is applied on $T$ . In general, if $G$ is not directly accessible + +![](images/e4f0d79317e2cd79bc15fc30c4d4aeea45c8d0e676e0e7d6a6cc2bb0c53bb505.jpg) +Figure 11. An example search through the graph for type reachability, starting from $T =$ number with the goal string, e.g., after parsing let x : string; $x = 1$ . States and edges along the final path are marked in green and explored nodes in blue. The () => number node is not explored, as complex types are avoided by our heuristic. The node () => string is explored as it enables reaching new type string. + +from $T$ , it will also not be accessible from expressions with the same root types but greater depth, such as $() \Rightarrow T$ . When $G$ is of higher order, exploring up to the depth of $G$ can be required, such as when $G = () \Rightarrow ((.) => \text{number})$ . Based on these two ideas, we stop exploring $S$ when $\text{DEPTH}(S) > \max(\text{DEPTH}(G), \text{DEPTH}(T))$ . + +Further, if a higher-depth function returns an unexplored type, we need to explore it. Sticking to the example in Figure 11, type number has the member toString of type () => string. The type string can only be reached by exploring the member access at depth 1. On the contrary, we do not explore a higher-depth function if it does not introduce novel types other than those explored. To achieve this, we adapt Algorithm 2 to additionally define a set of root types $R$ , which is initialized to an empty set and is updated by $R := R \cup \mathrm{root}(T)$ . We do not explore $S$ if $\mathrm{root}(S) \subseteq R$ . + +Taking the conjunction of the aforementioned two aspects, our pruning heuristic is implemented as PRUNESEARCH $(T,G,S) \coloneqq \mathrm{DEPTH}(S) > \max(\mathrm{DEPTH}(T), \mathrm{DEPTH}(S)) \wedge \mathrm{ROOT}(S) \subseteq R$ . The restrictions based on depth and root types are based on the results of the rigorously analyzed search over succinct types by Gvero et al. [30]. This provides a robust heuristic for exploring as many relevant inhabitable types as possible. However, due to the additional complexity introduced by the lookup function, we can not guarantee completeness and instead refer to the strong empirical results in our evaluation in §5 as evidence of the search's high coverage. + +# A.4 Implementation of DERIVABLE + +Recall that in Table 1, DERIVABLE for function expressions are defined as: $\mathrm{DERIVABLE}(q_{(\overline{p})\Rightarrow e})\coloneqq \{(\overline{p})\Rightarrow T\mid \mathrm{REACHABLE}(\mathrm{DERIVABLE}(q_e),T)\}$ . This involves constructing a type reachability graph and collecting all types $T$ reachable from DERIVABLE $(q_{e})$ . However, this process is intractable because $T$ can be of arbitrarily high-order, as such there are infinitely many $T$ to explore. A similar issue exists for grouped expressions, as their DERIVABLE function is also defined to enumerate reachable types. We introduce two optimization heuristics to address this problem. + +We first observe that DERIVABLE is always called within the context of an invocation of REACHABLE with target type $G$ , e.g., REACHABLE(DERIVABLE(q(\overline{p}) => e), G) for function expressions. To compute DERIVABLE(q(\overline{p}) => e), we enumerate all types present on the type graph represented by REACHABLE(DERIVABLE(q_e), G), which is finite due to application of the pruning heuristics in Appendix A.3. In other words, we bound the maximum complexity of considered types $T$ using the pruning heuristic for reachability of target type $G$ . This leads to a sound but potentially incomplete version of DERIVABLE. However, since the final goal is to reach $G$ , this heuristic provides a practically useful set of all relevant derivable types. + +Second, we observe that the resulting two-tiered call REACHABLE( DERIVABLE $(q_{(\overline{p})} \Rightarrow e)$ , $G$ ) can be integrated into a single call to further reduce the amount of explored types. Concretely, when discovering some type $M$ in REACHABLE( DERIVABLE $(q_e)$ , $G$ ), as per the previous heuristic, we allow transitioning directly to REACHABLE $(\overline{p}) \Rightarrow M, G$ to allow a depth-prioritizing exploration of the search graph. This allows us to efficiently discover a path to $G$ if it exists. + +# A.5 Statements + +We define the remaining automata to capture the complete language from §3.1. To correctly handle function return types, we pass on related information when entering function bodies: + +- $q.R$ : The expected return type of the current state $q$ . +- $q.$ RETURNED: Whether the currently parsed program block has returned in all branches. +- q.MUSTRETURN: Whether the currently parsed program block must return (i.e., If-Then-Else branches do not need to contain return statements even if a return type is expected of the surrounding code block). + +The single statement automaton is another recursive definition, since some statements, e.g., If-Then-Else, can themselves contain statements. The statement automaton is defined recursively as $A_{s} \coloneqq A_{\mathrm{DECL}} \cup A_{\mathrm{EXPR}} \cup A_{\mathrm{RET}} \cup A_{\mathrm{BLOCK}} \cup A_{\mathrm{FUN}} \cup A_{\mathrm{ITE}}$ . The expression statement automaton and block automaton are simply defined as $A_{\mathrm{EXPR}} \coloneqq A_{e}$ ; and $A_{\mathrm{BLOCK}} \coloneqq A_{\{\overline{s}\}}$ . The declaration automaton $A_{\mathrm{DECL}} \coloneqq A_{\mathrm{let} x:T}$ ; captures variable names $x$ using an automaton for non-existing identifiers, which works the same way as $A_{x}$ except that it rejects terminals that match an existing variable. This automaton is a prefix automaton as well, since indefinite additional characters can be added to the variable name and there are only finitely many defined variables. The If-Then-Else automaton is defined using standard concatenation: $A_{\mathrm{ITE}} \coloneqq A_{\mathrm{if}(e) s \text{else}s}$ . The statements automaton $A_{\overline{s}}$ , based on the Kleene-Star automaton definition and the single statement automaton. Return statements are only non-empty when the expected return type is set, i.e. when parsing inside a function: + +$$ +A _ {\mathrm {R E T}} := \left\{ \begin{array}{l l} A _ {\mathrm {r e t u r n}} \circ A _ {e} \downarrow T & \text {i f} A _ {\mathrm {R E T}}. R = T \\ A _ {\emptyset} & \text {o t h e r w i s e .} \end{array} \right. +$$ + +For functions, the automaton is based on the standard concatenation $A_{\text{FUN}} \coloneqq A_{\text{function } x(\overline{p}):T(\overline{s})}$ . However, the transition function updates the states of the statement automata inside the function: + +- $q.R \coloneqq T$ , i.e., the return type of these statements is set to the return type of the function. This value is propagated recursively to all sub-automata. +- $q$ .MUSTRETURN := true, for the outermost statement block automaton. It is set to false for deeper nested statement blocks and as soon as a parsed statement $X$ has $q_{X}$ .RETURNED set to true - i.e. one of the main body statements returned in every branch. +- $q. \text{RETURNED} :=$ false, per default in every statement, except a) in return automata, b) inside a multi-statement automaton where the previous statement has RETURNED = true and c) in ITE-automata where both branching statements have RETURNED = true. + +As long as a state $q$ in a multi-statement automaton has $X. \text{RETURNED} = \text{false}$ and $q. \text{MUSTRETURN} = \text{true}$ , it can not accept but instead forces the generation of another statement. Since we can always express the requested type through literals and can always generate a return statement to fulfill this requirement, the prefix automaton property is not violated. + +# B Details about Experimental Evaluation + +In this section, we detail how executable code is extracted from the model responses and a slight modification to the decoding algorithm used, that increases throughput heuristically. + +Implementation Details. We have two main external dependencies. To implement the regular-expression-based literal automata, we leverage the regex library, as it allows checking if the current string can be completed to match a regular expression. To implement LLM inference, we leverage the transformers library. We provide an exhaustive list of supported and unsupported features of the TypeScript language in our final implementation in Tables 5 and 6, respectively. + +Hyperparameters. We run the models on A100 NVidia GPUs with 80 GB of VRAM and CUDA version 12.4. We set the sampling temperature to 1. We set seeds to 0 to 4 on the four HumanEval runs and 0 on the one MBPP run, respectively. We limit the completions to 1000 tokens and time out after 300 seconds. We compute syntactic correctness using the Oxidation toolchain [52] as the official TypeScript compiler does not clearly distinguish between syntactic and semantic errors. + +Excluded MBPP Instances. We discovered that a number of TypeScript translations in the MultiPL-E dataset [13] contained invalidly generated nested tuples. After reporting them to the developers, they have been resolved in the latest version of MBPP and we include them in our evaluation. Still, we find that the TypeScript translation of a number of MBPP instances contains too broad type annotation, annotating elements as any or array of any. We therefore exclude the following 6 instances from the evaluation: + +- mbpp_405_check_tuplex +- mbpp_612_merge +- mbpp_563extract_values -mbpp_725.extract_quotation +- mbpp_580.extract_even +- mbpp_791_removeNSTed + +Complete Prompts. We provide the complete LLM prompts for our evaluated tasks (synthesis, translation, and repair) in Figures 12-14. The prompts are templates, instantiated with instructions specific to each task and problem instance. If system prompts are not available for a given LLM, we pretend the system prompt to the first user prompt. The model completion starts from a pre-filled function signature, enabling unified unit testing. For the repair prompt, we add the non-compilable model output as assistant output and use a second turn to pass back compiler outputs. Compiler errors contain line numbers for localization, so we annotate the output with line numbers. We find that Qwen2.5 32B tends to always generate test cases, which leads to errors during compilation. We therefore append the sentence Do not include test cases in the code. to its prompt. + +Extracting Output Code. Given our prompts, LLMs are expected to output the resulting programs. However, they often produce additional outputs, such as generated test cases and explanations. Now we describe our heuristics for extracting the generated code. We first extract the corresponding TypeScript code block (i.e., ``` typescript`, or do not cut off if the block is not closed. Inside the code block, we cut off after the closing curly brace of the last balanced pair of curly braces, if it is followed by a newline or semicolon. This determines the last statement block generated, and avoids cutting off, e.g., inside a template literal. Again, if no such case is found, we do not prune the output. We demonstrate the operation of our cutoff heuristics in Figure 15. + +# C Case Study Full Outputs + +In §5.4, we present the shortened versions of three qualitative examples showcasing the effectiveness of our approach. In Figures 16-18, we provide the full code outputs of these examples, with detailed descriptions in the respective captions. + +Table 5. Supported TypeScript features. + +
Supported TypeScript FeaturesExamples
Expressions, Statements, Function Declarations(LB as introduced in §3)
Additional Literals: BigInt, Regex, Template Strings10n, /\d*, 'hello ${user}'
Additional Types: void, null, undefinedvoid, undefined, null
Index Signature Types and Literalslet x: {{y: number}: string} = 1: "hi";
Anonymous Functionsfunction(): bool {return true}
Lambda Functions with and without Function Bodiesx => {return y}, x => y
Ternary and LogicOperators? :, |, &&
Arithmetic and Boolean Operations+, -, **, &, !
Assigning Pre-and Postfix Operators++, --
Arrays[1, 2, 3]
Access and Assignment to Computed Membersx[10] = y[i];
Constructors and "new" Callslet x = new Number(1);
Calls with Optional and Rest Parametersfunction foo(x?: number, y...: string)
Sets and MapsMap<string, number>}()
Parameterized Constructor Callsnew Set<string>}()
Tupleslet x: [int, string] = [1, "hello"];
Optional Chainingx.get("hi").get("world")
Spread Operator[...xs]
Type Assertions"hello" as any
For Loopsfor(int x = 0; i < 10; i++)
For Of Loopsfor(x of xs)
For Of Loops with Tuple Destructuringfor([x, y] of xys)
Do-While and While Loopswhile (true) {...}
Typed and Untyped Variable Declarationslet x: number = 1; let y = 100;
Comments, Multiline Comments// Comment
Returning without Expressionsreturn;
Try-Catch Statements with a Fixed Exception Typetry {...} catch (e) {...}
Throw Statementsthrow new Error("..."
Importing the crypto Libraryrequire("crypto")
Global Scope ObjectsMath, parseInt
Automatic Semicolon Insertion
+ +Table 6.Unsupported TypeScript features. + +
Missing FeaturesExamples
General Library Importsrequire("example")
Use of Functions Before Declaration
For In Loopsfor(x in y)
Type Declaration
User-Defined Classes
Declaration and Parameterized Call of General Parameterized Functions
Destructuring Assignment[x, y] = z
Uninitialized, Unannotated Variable Declarationslet x;
Return Type Inference
Literal Types
Enumerables
Symbols
+ +```typescript +System: +You are an expert in TypeScript programming. Solve the given problem by writing solution code in TypeScript. When answering, insert the solution code in a \*\*typescript... block. Do not include test cases in the code.. +User: +Check if in given array of numbers, are any two numbers closer to each other than given threshold. +>>> has_close_elements([1.0, 2.0, 3.0], 0.5) +false +>>> has_close_elements([1.0, 2.8, 3.0, 4.0, 5.0, 2.0], 0.3) +true function +Assistant: +``~typescript +function has_close_elements(numbers: number[], threshold: number): boolean { +``` + +Figure 12. The full prompt for the synthesis task. Text in green is based on the problem instance, in this case HumanEval #0. The red sentence is only added for Qwen2.5 32B due to its tendency to generate non-compiling test cases. + +```python +System: +You are a helpful and expert programmer in Python and TypeScript. You will be given an input program in Python and your task is to translate this program into TypeScript. You may assume that the input program is correct and that the translation should be semantically equivalent. When answering, insert the solution code in a \*\*typescript... block. Do not include test cases in the code.. +User: +The following is the source program in Python: +``python +from typing import List +def has_close_elements(numbers: List[float], threshold: float) -> bool: for idx, elem in enumerate(numbers): for idx2, elem2 in enumerate(numbers): if idx != idx2: distance = abs(elem - elem2) if distance < threshold: return True return False +Please translate the source program to TypeScript. +Assistant: +``'typescript +function has_close_elements(numbers: number[], threshold: number): boolean { +``` + +Figure 13. The full prompt for the translation task. Text in green is based on the problem instance, blue is the original Python canonical solution, in this case HumanEval #0. The red sentence is only added for Qwen2.5 32B due to its tendency to generate non-compiling test cases. + +System: + +You are an expert in JavaScript programming. Solve the given problem by writing solution code in JavaScript. When answering, insert the solution code in a ````````````````````````````````````````````````````````````````````````````````````````````````````` + +User: + +```txt +Given an array of positive integers x. return a sorted array of all elements that hasn't any even digit. +Note: Returned array should be sorted in increasing order. +For example: +>>unique_digits([15,33,1422,1]) +[1, 15, 33] +>>unique_digits([152, 323, 1422, 10]) +[] +``` + +Assistant: + +```typescript +001: function unique_digits(x: number[]) : number[] { +002: const filteredArray: number[] = []; // initialize empty array +003: for (let i = 0; i < x.length; i++) { +004: let strx = "" + x[i]; +005: let filteredStr = strx +006: .ToString() +007: .split(""); +008: .filter((digit) => parseInt(digit) % 2 != 0); +009: if (filteredStr.length > 0) { +010: filteredArray.push(...filteredStr); +011: } +012: } +013: return filteredArray.sort((a, b) => a - b); +014: } +``` + +User: + +```txt +This output produced an error: +../../../.././tmp/tmpaewlckfv.ts(10,26): error TS2345: Argument of type 'string' is not assignable to parameter of type 'number'. +``` + +Write the program again, and make sure to fix the error this time. + +Assistant: + +```typescript +``` +```script +function unique_digits(x: number[]) : number[] { +``` + +Figure 14. The full prompt for the repair task, where we simulate a multi-turn interaction. Text in green is based on the problem instance. blue is non-compilable model output from the synthesis step, in this case HumanEval #104 and Gemma 2 2B. red is compiler feedback. Generations from the synthesis step are annotated with line numbers to aid localizing the compiler error. + +```txt +``` +``` +function next_smallest PALINDrome(num: number): number { + const numStr = num.toString(); + const reversedNumStr = numStr.splitAGING).reverse().joinAGING); + let min = Math.min(Number(numStr), Number(reversedNumStr)); +} +if (min === Number(numStr)) { + return Number(numStr); +} +else { + return parseInt(min.toString() + 1); +} +} +} +const findNextSmallestPalindrome = next_smallest PALINDrome(12321); +console.logfindNextSmallestPalindrome); // Output: 12322 +} +**Explanation** +The function takes a number as input (^num>) in the given criteria. +``` + +Figure 15. The code generated by Gemma 2 2B for MBPP #0. The cutoff algorithm first extracts the fenced code, last line marked in blue. To avoid the usage of the console object from the browser extension of TypeScript in the generated examples, we further cut off after the last code block, marked in red. + +```typescript +1 function select_words(s: string, n: number): string[] { +2 const result: string[] = []; +3 for (const word of s.split()) { +4 let nConsonants = 0; +5 for (let i = 0; i < word.length; i++) { +6 const char = word[i].toLowerCase(); +7 if (!['a', 'e', 'i', 'o', 'u'].includes(char)) { +8 nConsonants++; +9 } +10 } +11 if (nConsonants === n) { +12 result.push(word); +13 } +14 } +15 return result; +16 } +``` + +(a) The code generated by the unconstrained Gemma 2 2B when translating a Python solution. The model tries to call the split member of a string object without parameters. This is legal in Python but not allowed in TypeScript. + +```typescript +1 function select_words(s: string, n: number): string[] { +2 const result: string[] = []; +3 for (const word of s.split()) { +4 let nConsonants = 0; +5 for (let i = 0; i < word.length; i++) { +6 const letter = word[i].toLowerCase(); +7 if (!['a', 'e', 'i', 'o', 'u'].includesletter)) { +8 nConsonants++; +9 } +10 } +11 if (nConsonants === n) { +12 result.push(word); +13 } +14 } +15 return result; +16 } +``` + +(b) The code generated by Gemma 2 2B with type constraints. The constraints guide the model and help it generate the required argument to split, which determines the characters around which to split the string. + +Figure 16. Comparison between the function generated by unconstrained Gemma 2 2B [64] and the code generated with the guidance of type constraints on the problem select_words in HumanEval [14]. This problem requires a function that selects words from a given list that contain a specified number of consonants. + +```typescript +function largest_divisor(n: number): number { +for(let i = n - 1; i > 0; i--) { +if(n % i == 0) { +return i; +} +} +} +``` + +(a) The code generated by the unconstrained Deepseek Coder 33B Instruct. The function loops over all numbers smaller than $n$ to determine the largest divisor. Even though functionally correct for positive numbers, this function returns undefined when the return statement in the loop is never called (e.g., for negative inputs), violating the type system rules. + +```typescript +function largest_divisor(n: number): number { +for(let i = n - 1; i > 0; i--) { +if(n % i == 0) { +return i; +} +} +return 1; // Return 1 if no such divisor found +} +``` + +(b) The code generated by DeepseekCoder 33B Instruct with type constraints. The system ensures that every path returns a value matching the annotated type. This forces the LLM to generate another line after the loop, and the model correctly chooses to generate a fallback return statement. + +Figure 17. Comparison between the function generated by the unconstrained DeepSeekCoder 33B Instruct [28] and the code generated with the guidance of type constraints on the problem largest_divisor in HumanEval [14]. The problem asks to compute the largest divisor of a given number. + +```typescript +function intersperse(numbers: number[], delimiter: number): number[] { +return numbers.reduce((acc, curr, index) => { +acc.push(curr); +if (index < numbers.length - 1) { +acc.push(delimeter); +} +return acc; +}, []); +} +``` + +(a) The code generated by the unconstrained Gemma 2 9B. The code uses the reduce member of arrays in TypeScript. The reduce member is a generic function that accepts a callback as first argument. This callback has three parameters: an accumulator, a current value and an index. The types of the second and third parameter are fixed to the element type of the callee array and number. The first argument type of the callback function is automatically inferred from the type of the second argument to the reduce function, in this case an empty array. The type of this array is never [], a special type denoting arrays without members. Since we can assign this type to the callee type number[], the accumulator parameter obtains type never[]. However, calling push on this type results in a type violation as the pushed value of type number is not compatible with the inferred element type never. This issue is well known in TypeScript and even human developers frequently stumble upon this issue [47, 48]. + +```typescript +function intersperse(numbers: number[], delimiter: number): number[] { +return numbers.reduce((acc: number], num: number, index: number) => { +if (index > 0) { +acc.push(delimeter); +} +acc.push(num); +return acc; +}, []); +} +``` + +(b) The code generated by Gemma 2 9B [64] with type constraints. We force the model to generate type annotations for the callback function in reduce and thus enable the model to generate the correct type annotation and avoid the issue in the unconstrained code. + +Figure 18. Comparison between the function generated by the unconstrained Gemma 2 9B [73] and the code generated with the guidance of type constraints on the problem intersperse in HumanEval [14]. The task in this problem is to insert a delimiter number between consecutive elements of an input list. \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09246/images/0c4baffe0fc9edc7385e27cc94d0c45ae5838eae71ab014be51ed0f83954a62d.jpg b/data/2025/2504_09xxx/2504.09246/images/0c4baffe0fc9edc7385e27cc94d0c45ae5838eae71ab014be51ed0f83954a62d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8d231d8d020efcdbbed7448245af0cbd3fd6ebee --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/images/0c4baffe0fc9edc7385e27cc94d0c45ae5838eae71ab014be51ed0f83954a62d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76d5c5553a1762945c45d1fc0341fb301eeb2d4bf0911b41d9fe09799e51c768 +size 172267 diff --git a/data/2025/2504_09xxx/2504.09246/images/17dc9774d0b942f13d1da1e6a3601e40b9e4be909052556ce334fd429a6d1c28.jpg b/data/2025/2504_09xxx/2504.09246/images/17dc9774d0b942f13d1da1e6a3601e40b9e4be909052556ce334fd429a6d1c28.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3b351722862550a793705eb1bc7eba93a6c0c0fa --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/images/17dc9774d0b942f13d1da1e6a3601e40b9e4be909052556ce334fd429a6d1c28.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac45c924b6dc928334706db993af864c35dac55464a1282b1fa4b75941a5efd4 +size 15335 diff --git a/data/2025/2504_09xxx/2504.09246/images/1f1a04cc743632903100ca0c76f3e776b8f11d465997df71f1a7f2b79743c678.jpg b/data/2025/2504_09xxx/2504.09246/images/1f1a04cc743632903100ca0c76f3e776b8f11d465997df71f1a7f2b79743c678.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b704bb31c32f1d3e6f165d54226c00d383f81fa3 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/images/1f1a04cc743632903100ca0c76f3e776b8f11d465997df71f1a7f2b79743c678.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b14b547249675c700a6aca2f57436e8d5c362fd62406e955a0fdaf9d6134ba7b +size 51762 diff --git a/data/2025/2504_09xxx/2504.09246/images/2fd0789c478629059061d1f04d2504d5c39529783b9438d88c243ed727ac7a60.jpg b/data/2025/2504_09xxx/2504.09246/images/2fd0789c478629059061d1f04d2504d5c39529783b9438d88c243ed727ac7a60.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7aaa3cf0d108abbba3c8dd16ba198749ec4231a3 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/images/2fd0789c478629059061d1f04d2504d5c39529783b9438d88c243ed727ac7a60.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d0d47f342f3f94b1efa71ff90c0495aa5771b1ef3b26246c6fe75d6dfeb3087 +size 10420 diff --git a/data/2025/2504_09xxx/2504.09246/images/3358439ee189d4f3ef4469308d320b9785a0a8f548a1598b1b5c41afddad2b06.jpg b/data/2025/2504_09xxx/2504.09246/images/3358439ee189d4f3ef4469308d320b9785a0a8f548a1598b1b5c41afddad2b06.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9d7779f8e40c80b7699a80342ee0bb7cf9528de0 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/images/3358439ee189d4f3ef4469308d320b9785a0a8f548a1598b1b5c41afddad2b06.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ec8ad8808fb35e1cd0b54a12e294d94e02fd5a8d245130f5059259db5786bf2 +size 4988 diff --git a/data/2025/2504_09xxx/2504.09246/images/37ee7ea4b02c77c67357e62f6555bfa33bbc3c8da9616d5c45611df687113026.jpg b/data/2025/2504_09xxx/2504.09246/images/37ee7ea4b02c77c67357e62f6555bfa33bbc3c8da9616d5c45611df687113026.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7cec21a23fc666e3ec4b061e32641ec3febddd84 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/images/37ee7ea4b02c77c67357e62f6555bfa33bbc3c8da9616d5c45611df687113026.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aad06e4467324ae85fc85587bc6e72db0887dd773da1d6c9b97ffd80c429c52d +size 8979 diff --git a/data/2025/2504_09xxx/2504.09246/images/3d4a440dee5f7b23d9e7008f350194720276a69ffa55cf4cea128326c345decb.jpg b/data/2025/2504_09xxx/2504.09246/images/3d4a440dee5f7b23d9e7008f350194720276a69ffa55cf4cea128326c345decb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..05b3b4f45edf495aeb97b35b018d4db6d75663b7 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/images/3d4a440dee5f7b23d9e7008f350194720276a69ffa55cf4cea128326c345decb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ad95faf346d3be82c5a62a683c65b21dfc4dbe2688fa7563cbcdc16aa1b6828 +size 13011 diff --git a/data/2025/2504_09xxx/2504.09246/images/5061290a5981e83f5d5267b7bd48cef13eddf525ec623f2d9e3f58041b5fd498.jpg b/data/2025/2504_09xxx/2504.09246/images/5061290a5981e83f5d5267b7bd48cef13eddf525ec623f2d9e3f58041b5fd498.jpg new file mode 100644 index 0000000000000000000000000000000000000000..008deaea42e227085bfe87a7f31b05fab0688fe5 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/images/5061290a5981e83f5d5267b7bd48cef13eddf525ec623f2d9e3f58041b5fd498.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c8e7f805cf834841a822646bb95d5436495c3a801fe1c90cdfd797097e79525 +size 13488 diff --git a/data/2025/2504_09xxx/2504.09246/images/5a33822d9ca45c5300fff9da50bd03a045de3900a76ddac386ac07c04f35eed1.jpg b/data/2025/2504_09xxx/2504.09246/images/5a33822d9ca45c5300fff9da50bd03a045de3900a76ddac386ac07c04f35eed1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..daf3c0365b9457c1bc4c18079975b3e8021bf0cf --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/images/5a33822d9ca45c5300fff9da50bd03a045de3900a76ddac386ac07c04f35eed1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8796c79e01e20d2a40bcfad3d40ce3ddc1fd8cae5464a22db05720ba9efd9d2c +size 13542 diff --git a/data/2025/2504_09xxx/2504.09246/images/60fa4e5789c10a64865475b022f3cb99f35909315ba98a7414da61c6223607ad.jpg b/data/2025/2504_09xxx/2504.09246/images/60fa4e5789c10a64865475b022f3cb99f35909315ba98a7414da61c6223607ad.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8fa33a3bbf9eca9aa87d36fdbdb71e232250f0d1 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/images/60fa4e5789c10a64865475b022f3cb99f35909315ba98a7414da61c6223607ad.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4d38dee5358b26c8a0655f847e698e2bcf826f8ac50c42785aaba2ddf1badda +size 11038 diff --git a/data/2025/2504_09xxx/2504.09246/images/638ff536adeb7add76a966ada67900a19a856f6068152936b40b9f10840cbc7e.jpg b/data/2025/2504_09xxx/2504.09246/images/638ff536adeb7add76a966ada67900a19a856f6068152936b40b9f10840cbc7e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..30941e1637fe23f9282b0aa338d9997809da8ecc --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/images/638ff536adeb7add76a966ada67900a19a856f6068152936b40b9f10840cbc7e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:168ec11639595f2b5f86feeb13a7816cd4a5888e791296aff4d5b453c457751c +size 69648 diff --git a/data/2025/2504_09xxx/2504.09246/images/72e91109dcf5c43d35127ada7978584ddc3a01d52f248a6f41efc03e3062ce21.jpg b/data/2025/2504_09xxx/2504.09246/images/72e91109dcf5c43d35127ada7978584ddc3a01d52f248a6f41efc03e3062ce21.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fcd2c2de3a798c731600b8502c87fc2c50f503a1 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/images/72e91109dcf5c43d35127ada7978584ddc3a01d52f248a6f41efc03e3062ce21.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7a559440ce4a63d6998128e05545c0a63cb24bcab8e08bb7e729170065a90a9 +size 32102 diff --git a/data/2025/2504_09xxx/2504.09246/images/743d1ede4fcce2829efeee991f239fd26c6755ecff1cb142a39893bdf51e8ec9.jpg b/data/2025/2504_09xxx/2504.09246/images/743d1ede4fcce2829efeee991f239fd26c6755ecff1cb142a39893bdf51e8ec9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..92836e17abe152502dfcd5c6316ba2e97a9ec553 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/images/743d1ede4fcce2829efeee991f239fd26c6755ecff1cb142a39893bdf51e8ec9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cadd0dc1291f754f59021cd368ba4626283e9ccc7c150608fadf8159d6df06c6 +size 26339 diff --git a/data/2025/2504_09xxx/2504.09246/images/7677ba40fa4dc888f7b11ccdde1711e919f778b9badf962eced9f4c57b32331b.jpg b/data/2025/2504_09xxx/2504.09246/images/7677ba40fa4dc888f7b11ccdde1711e919f778b9badf962eced9f4c57b32331b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7f38233b976d40d985916b852b02f59422d3f37c --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/images/7677ba40fa4dc888f7b11ccdde1711e919f778b9badf962eced9f4c57b32331b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e939c3a7e066a148d69a436f5ed86f24ea897ba4eec26fd6ca8d408f510e9a9f +size 17607 diff --git a/data/2025/2504_09xxx/2504.09246/images/7bd7f054612c4b5ce3ec157716303576583113e6a0fcb2d75dc6408106b57761.jpg b/data/2025/2504_09xxx/2504.09246/images/7bd7f054612c4b5ce3ec157716303576583113e6a0fcb2d75dc6408106b57761.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ca1caded9c7a70b0db897659b10a98d6af5a4e2e --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/images/7bd7f054612c4b5ce3ec157716303576583113e6a0fcb2d75dc6408106b57761.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb88f9a3bec7bdaead9ede6a29ac554b22477a2de537637851c9e50a6e0f50eb +size 8521 diff --git a/data/2025/2504_09xxx/2504.09246/images/7e68d3859b1c3c3ce72398ab31c9f29439281fa842eacc7395389c0c754b5a7f.jpg b/data/2025/2504_09xxx/2504.09246/images/7e68d3859b1c3c3ce72398ab31c9f29439281fa842eacc7395389c0c754b5a7f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..76b7d46b42579f6e3fbd356c93d59c2aa71bdc19 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/images/7e68d3859b1c3c3ce72398ab31c9f29439281fa842eacc7395389c0c754b5a7f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2672dc29c936ee2e72054733d66af0e87a90a59e8f66b833914ce354243ddd55 +size 27230 diff --git a/data/2025/2504_09xxx/2504.09246/images/813bece72a796c00b4947d62c4234cd08bdb1c79907a0935b2564c24da881c0e.jpg b/data/2025/2504_09xxx/2504.09246/images/813bece72a796c00b4947d62c4234cd08bdb1c79907a0935b2564c24da881c0e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..56c35a9c50710384a3f782e34cbcdbe2a147468e --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/images/813bece72a796c00b4947d62c4234cd08bdb1c79907a0935b2564c24da881c0e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3cbbf99802050a96b0ea47bc744f2fb14d2517f22f68768c92ccb763aeb61a0 +size 11321 diff --git a/data/2025/2504_09xxx/2504.09246/images/847ef4f35ce5a95c759a2415d323273e8c7cbd2578acc0a62740b9dd06115d1b.jpg b/data/2025/2504_09xxx/2504.09246/images/847ef4f35ce5a95c759a2415d323273e8c7cbd2578acc0a62740b9dd06115d1b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ec5efbc54fe56e5236dfa8b4474eac13f6320829 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/images/847ef4f35ce5a95c759a2415d323273e8c7cbd2578acc0a62740b9dd06115d1b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e416049203568b7c2ed7374189a003b8f2d27a32bee8333da2c56f52c43f483 +size 10928 diff --git a/data/2025/2504_09xxx/2504.09246/images/8791541988ff67791f7d7b874ab136d22e0e41969cd1283171572f806c51e768.jpg b/data/2025/2504_09xxx/2504.09246/images/8791541988ff67791f7d7b874ab136d22e0e41969cd1283171572f806c51e768.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4d2e0166a7d88313c714e759c70b1dff109f8447 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/images/8791541988ff67791f7d7b874ab136d22e0e41969cd1283171572f806c51e768.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0d87fa411510bc85198856ffe4a607a021d71957301096f3c75cae0b1092f64 +size 3268 diff --git a/data/2025/2504_09xxx/2504.09246/images/946af2491169f3cdaa486ef0392824c733f75141a4c823922253fc196de5d150.jpg b/data/2025/2504_09xxx/2504.09246/images/946af2491169f3cdaa486ef0392824c733f75141a4c823922253fc196de5d150.jpg new file mode 100644 index 0000000000000000000000000000000000000000..77947f5c9bf784b67f52dc2794f2711b11ffc921 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/images/946af2491169f3cdaa486ef0392824c733f75141a4c823922253fc196de5d150.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ad456c0846a437c3a52d0623d043ea4bd2933f244532e48451fa8dc120775c7 +size 91514 diff --git a/data/2025/2504_09xxx/2504.09246/images/97d87110c0b553b842106c346a101d14eee820100979e854ea26b5cfa06cdac3.jpg b/data/2025/2504_09xxx/2504.09246/images/97d87110c0b553b842106c346a101d14eee820100979e854ea26b5cfa06cdac3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8624728b6a34dc2eea48dc3268aa6532f9dd1412 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/images/97d87110c0b553b842106c346a101d14eee820100979e854ea26b5cfa06cdac3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ce3334af0bf5a4607bb395ada3aaa7bcfd096cb6771a4884a6d8ef5603349f7 +size 14869 diff --git a/data/2025/2504_09xxx/2504.09246/images/9a9fe5e21ca0fc4ade3648a646c2e9bf76dafd38627781c95101a0ee50d528d5.jpg b/data/2025/2504_09xxx/2504.09246/images/9a9fe5e21ca0fc4ade3648a646c2e9bf76dafd38627781c95101a0ee50d528d5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d699f6fb87f03a8d576e8126f47ae759fadc87ab --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/images/9a9fe5e21ca0fc4ade3648a646c2e9bf76dafd38627781c95101a0ee50d528d5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bada900ba758394e25ab0a241bdc576f8cbb97d1e9a6d124904e9a24682faf0c +size 6277 diff --git a/data/2025/2504_09xxx/2504.09246/images/9d74ba92a9fcd3e1568ae4c53fd0457a08b4f8d70c4687357fd52cd36b559d6b.jpg b/data/2025/2504_09xxx/2504.09246/images/9d74ba92a9fcd3e1568ae4c53fd0457a08b4f8d70c4687357fd52cd36b559d6b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..90e3df7d658f006055d3444c66b568011b9605fe --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/images/9d74ba92a9fcd3e1568ae4c53fd0457a08b4f8d70c4687357fd52cd36b559d6b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b3398d2061e8605de95ab915a16935e419649aaa858b713efe98def8531f399a +size 12969 diff --git a/data/2025/2504_09xxx/2504.09246/images/9e6f17ee5a65303d2aa22c5b0747c5139f872d0dd3a0f6c386f946ab69f27555.jpg b/data/2025/2504_09xxx/2504.09246/images/9e6f17ee5a65303d2aa22c5b0747c5139f872d0dd3a0f6c386f946ab69f27555.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0bed1ccbacd12c17de734d788b396de14ed7f1f5 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/images/9e6f17ee5a65303d2aa22c5b0747c5139f872d0dd3a0f6c386f946ab69f27555.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32ffe626b82bfa3d3d53ca8040dadbc4b2a763f758ca84154abe5fa5305b2611 +size 11472 diff --git a/data/2025/2504_09xxx/2504.09246/images/9fdda853962702543e4b74b4a4950ce8a8bb257496566563d2784d4fed07cc6e.jpg b/data/2025/2504_09xxx/2504.09246/images/9fdda853962702543e4b74b4a4950ce8a8bb257496566563d2784d4fed07cc6e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..53c9e0ba611ec89613b3cc0af2d0320a98a9ce38 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/images/9fdda853962702543e4b74b4a4950ce8a8bb257496566563d2784d4fed07cc6e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c5bc302b016806cad53a101a700cf5cb21003c14d7ef2f2cd0e8402ab59017c +size 116232 diff --git a/data/2025/2504_09xxx/2504.09246/images/a0f7132e7a45261ca334925eb701aadbd125056868be897f704152471a35f2a3.jpg b/data/2025/2504_09xxx/2504.09246/images/a0f7132e7a45261ca334925eb701aadbd125056868be897f704152471a35f2a3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d98b7b6fb7db354d4d066da447efc32172ce628e --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/images/a0f7132e7a45261ca334925eb701aadbd125056868be897f704152471a35f2a3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0262fbd75b260029bc4b11cd349c542a9376cdca4eabe72b7c974c208c777a44 +size 4542 diff --git a/data/2025/2504_09xxx/2504.09246/images/a30990f5fbd78a3d5bd03abb5291ed565e72139dbd4b176396fd845291662c23.jpg b/data/2025/2504_09xxx/2504.09246/images/a30990f5fbd78a3d5bd03abb5291ed565e72139dbd4b176396fd845291662c23.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7d8d1d3dadc8b33520ee67ee5031507b6aaf0682 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/images/a30990f5fbd78a3d5bd03abb5291ed565e72139dbd4b176396fd845291662c23.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b6bf8585e2b755474fa0344c6d0665f45983b8a9496cdeb23d9f8549d9deb9b5 +size 69826 diff --git a/data/2025/2504_09xxx/2504.09246/images/aed795b7a573ef796c6718dfe29dd3f1629d86f6914bb2b22785860942121b53.jpg b/data/2025/2504_09xxx/2504.09246/images/aed795b7a573ef796c6718dfe29dd3f1629d86f6914bb2b22785860942121b53.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7e55d4d624b9f990a11bb866cd43035828f9053b --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/images/aed795b7a573ef796c6718dfe29dd3f1629d86f6914bb2b22785860942121b53.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2824995647bda280bf054be3bda8ce7fc97d601bd2bfd768490fe5bb80831ef5 +size 39338 diff --git a/data/2025/2504_09xxx/2504.09246/images/affe5abc4b21e199de8d5fd3ee6d500dc4622a2daba4cad2f072210df2eed967.jpg b/data/2025/2504_09xxx/2504.09246/images/affe5abc4b21e199de8d5fd3ee6d500dc4622a2daba4cad2f072210df2eed967.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a794a645d9ac6e883b6789624fabee9845c89298 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/images/affe5abc4b21e199de8d5fd3ee6d500dc4622a2daba4cad2f072210df2eed967.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16266397c629177099aaf1deeaedd59bfbd35650430fda87f12d78507c204642 +size 6467 diff --git a/data/2025/2504_09xxx/2504.09246/images/b8dd3da9566767a457ff25647916a8656a65423e1b578756cff759a72841a378.jpg b/data/2025/2504_09xxx/2504.09246/images/b8dd3da9566767a457ff25647916a8656a65423e1b578756cff759a72841a378.jpg new file mode 100644 index 0000000000000000000000000000000000000000..21f5dd21d2f5ec44599cb1547f8b3f4e333373a9 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/images/b8dd3da9566767a457ff25647916a8656a65423e1b578756cff759a72841a378.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1458be3e67f389a807fef186073ec1d8dccb34b270c78fb28e4595050fc7e3f +size 5579 diff --git a/data/2025/2504_09xxx/2504.09246/images/bc2c84cf62bc4f1b6b8be2c767b86dfdf0c9e4ef50d25a74033e23d5a5b1c1f5.jpg b/data/2025/2504_09xxx/2504.09246/images/bc2c84cf62bc4f1b6b8be2c767b86dfdf0c9e4ef50d25a74033e23d5a5b1c1f5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c48c353e4a541120c38fcaa6d727376b86740d6a --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/images/bc2c84cf62bc4f1b6b8be2c767b86dfdf0c9e4ef50d25a74033e23d5a5b1c1f5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b18a26d39703741bf8a3d7d411f2fea5cded56a4d197253c7ada9cd0ac3dc33f +size 12932 diff --git a/data/2025/2504_09xxx/2504.09246/images/bf0afb47f1cfa0e9f681afec1348c6553d11edd6b8f05b746402a2fb18e6f6cd.jpg b/data/2025/2504_09xxx/2504.09246/images/bf0afb47f1cfa0e9f681afec1348c6553d11edd6b8f05b746402a2fb18e6f6cd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0b11db82082862353ed00986b5a7af1199f2c9df --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/images/bf0afb47f1cfa0e9f681afec1348c6553d11edd6b8f05b746402a2fb18e6f6cd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d1b76678c25317f9a9260d4e15ff955a2bddbe2ea12787acb33224f74255929 +size 73044 diff --git a/data/2025/2504_09xxx/2504.09246/images/e4f0d79317e2cd79bc15fc30c4d4aeea45c8d0e676e0e7d6a6cc2bb0c53bb505.jpg b/data/2025/2504_09xxx/2504.09246/images/e4f0d79317e2cd79bc15fc30c4d4aeea45c8d0e676e0e7d6a6cc2bb0c53bb505.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5b3a44ac160bcf22ba53462bb572a67f53d96cf3 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/images/e4f0d79317e2cd79bc15fc30c4d4aeea45c8d0e676e0e7d6a6cc2bb0c53bb505.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77a21c653cb02c62c79776b74a766ef855fdd6d15526948ce2cf0eb362abf122 +size 20803 diff --git a/data/2025/2504_09xxx/2504.09246/images/f073b7ee2da4caa55b5264bcf1f31d1350cf09e38048dbbafe09e355869400df.jpg b/data/2025/2504_09xxx/2504.09246/images/f073b7ee2da4caa55b5264bcf1f31d1350cf09e38048dbbafe09e355869400df.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ff11fe19cafb1eee1a5e42a79ccf5712f9b2dee4 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/images/f073b7ee2da4caa55b5264bcf1f31d1350cf09e38048dbbafe09e355869400df.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:18d89096920ee1a034c7aa8e9f2ee773ebcff77bcbf8a455a3c98a95e489ec76 +size 9807 diff --git a/data/2025/2504_09xxx/2504.09246/images/f1f6047b803098218c7b4710fa33ac5b887e6008b7372f9b4df3091be5d49f88.jpg b/data/2025/2504_09xxx/2504.09246/images/f1f6047b803098218c7b4710fa33ac5b887e6008b7372f9b4df3091be5d49f88.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3b696ee57964690195e4d95b5ec20463f6c9f589 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/images/f1f6047b803098218c7b4710fa33ac5b887e6008b7372f9b4df3091be5d49f88.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14adb05319776b2cc0afb64b3925fabf8040bbe6d07f472eb1047420f1ef96df +size 13335 diff --git a/data/2025/2504_09xxx/2504.09246/images/f44709204f1915dbf83dda816a61b48ceff15c92295db67e8bf0a7f417ce5aea.jpg b/data/2025/2504_09xxx/2504.09246/images/f44709204f1915dbf83dda816a61b48ceff15c92295db67e8bf0a7f417ce5aea.jpg new file mode 100644 index 0000000000000000000000000000000000000000..853af38c77ba8293b404446bc7e4d6bc9cfcc4ea --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/images/f44709204f1915dbf83dda816a61b48ceff15c92295db67e8bf0a7f417ce5aea.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d700b8229d1d344023fafd9b352affc86121f45d3c05010a1530f01ed1e01a45 +size 13672 diff --git a/data/2025/2504_09xxx/2504.09246/layout.json b/data/2025/2504_09xxx/2504.09246/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..f6cde26869a75cb5deee09e664c1e5b3599b556e --- /dev/null +++ b/data/2025/2504_09xxx/2504.09246/layout.json @@ -0,0 +1,30032 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 43, + 82, + 430, + 100 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 82, + 430, + 100 + ], + "spans": [ + { + "bbox": [ + 43, + 82, + 430, + 100 + ], + "type": "text", + "content": "Type-Constrained Code Generation with Language Models" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 42, + 108, + 227, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 108, + 227, + 120 + ], + "spans": [ + { + "bbox": [ + 42, + 108, + 227, + 120 + ], + "type": "text", + "content": "NIELS MündLER*, ETH Zurich, Switzerland" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 43, + 122, + 188, + 134 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 122, + 188, + 134 + ], + "spans": [ + { + "bbox": [ + 43, + 122, + 188, + 134 + ], + "type": "text", + "content": "JINGXUAN HE*, UC Berkeley, USA" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 136, + 174, + 146 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 136, + 174, + 146 + ], + "spans": [ + { + "bbox": [ + 44, + 136, + 174, + 146 + ], + "type": "text", + "content": "HAO WANG,UC Berkeley,USA" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 148, + 185, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 148, + 185, + 159 + ], + "spans": [ + { + "bbox": [ + 44, + 148, + 185, + 159 + ], + "type": "text", + "content": "KOUSHIK SEN, UC Berkeley, USA" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 161, + 181, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 161, + 181, + 172 + ], + "spans": [ + { + "bbox": [ + 44, + 161, + 181, + 172 + ], + "type": "text", + "content": "DAWN SONG, UC Berkeley, USA" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 174, + 225, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 174, + 225, + 185 + ], + "spans": [ + { + "bbox": [ + 44, + 174, + 225, + 185 + ], + "type": "text", + "content": "MARTIN VECHEV, ETH Zurich, Switzerland" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 42, + 192, + 441, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 192, + 441, + 345 + ], + "spans": [ + { + "bbox": [ + 42, + 192, + 441, + 345 + ], + "type": "text", + "content": "Large language models (LLMs) have achieved notable success in code generation. However, they still frequently produce uncompilable output because their next-token inference procedure does not model formal aspects of code. Although constrained decoding is a promising approach to alleviate this issue, it has only been applied to handle either domain-specific languages or syntactic features of general-purpose programming languages. However, LLMs frequently generate code with typing errors, which are beyond the domain of syntax and generally hard to adequately constrain. To address this challenge, we introduce a type-constrained decoding approach that leverages type systems to guide code generation. For this purpose, we develop novel prefix automata and a search over inhabitable types, forming a sound approach to enforce well-typedness on LLM-generated code. We formalize our approach on a foundational simply-typed language and extend it to TypeScript to demonstrate practicality. Our evaluation on the HumanEval and MBPP datasets shows that our approach reduces compilation errors by more than half and significantly increases functional correctness in code synthesis, translation, and repair tasks across LLMs of various sizes and model families, including state-of-the-art open-weight models with more than 30B parameters. The results demonstrate the generality and effectiveness of our approach in constraining LLM code generation with formal rules of type systems." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 42, + 350, + 441, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 350, + 441, + 372 + ], + "spans": [ + { + "bbox": [ + 42, + 350, + 441, + 372 + ], + "type": "text", + "content": "CCS Concepts: • Theory of computation → Formal languages and automata theory; • Software and its engineering → General programming languages; • Computing methodologies → Machine learning." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 42, + 376, + 441, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 376, + 441, + 400 + ], + "spans": [ + { + "bbox": [ + 42, + 376, + 441, + 400 + ], + "type": "text", + "content": "Additional Key Words and Phrases: Code Generation, Language Model, Type System, Program Synthesis, Program Translation, Program Repair, Constrained Decoding" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 44, + 408, + 119, + 419 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 408, + 119, + 419 + ], + "spans": [ + { + "bbox": [ + 44, + 408, + 119, + 419 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 42, + 423, + 442, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 423, + 442, + 506 + ], + "spans": [ + { + "bbox": [ + 42, + 423, + 442, + 506 + ], + "type": "text", + "content": "Large language models (LLMs) are remarkably successful in diverse fields [12, 27, 49] and increasingly used in everyday coding tasks [25, 68]. They show promising capabilities at synthesizing code from natural language descriptions [37, 59], translating between programming languages [59], and repairing incorrect programs [44, 74]. Despite these achievements, LLM-generated code often contains compilation errors, logic flaws, or security vulnerabilities [20, 53, 55]. These issues arise because LLMs generate code by iteratively sampling the next token from a vocabulary of tokens – a probabilistic process that does not provide any formal guarantees." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 42, + 507, + 441, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 507, + 441, + 555 + ], + "spans": [ + { + "bbox": [ + 42, + 507, + 441, + 555 + ], + "type": "text", + "content": "A promising technique to address this limitation is constrained decoding, which enforces the formal rules of programming languages during LLMs' code generation process, rejecting invalid tokens and ensuring only valid tokens are considered as generation candidates. Previous studies have shown that constrained decoding improves adherence to program syntax [8, 41, 57, 66]." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 43, + 561, + 157, + 572 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 561, + 157, + 572 + ], + "spans": [ + { + "bbox": [ + 43, + 561, + 157, + 572 + ], + "type": "text", + "content": "*Both authors co-lead this project." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 42, + 579, + 441, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 579, + 441, + 619 + ], + "spans": [ + { + "bbox": [ + 42, + 579, + 441, + 619 + ], + "type": "text", + "content": "Authors' Contact Information: Niels Mündler, niels.muendler@inf.ethz.ch, ETH Zurich, Switzerland; Jingxuan He, jingxuan. he@berkeley.edu, UC Berkeley, USA; Hao Wang, hwang628@berkeley.edu, UC Berkeley, USA; Koushik Sen, ksen@berkeley. edu, UC Berkeley, USA; Dawn Song, dawnsong@berkeley.edu, UC Berkeley, USA; Martin Vechev, martin.vechev@inf.ethz.ch, ETH Zurich, Switzerland." + } + ] + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 44, + 627, + 95, + 646 + ], + "blocks": [ + { + "bbox": [ + 44, + 627, + 95, + 646 + ], + "lines": [ + { + "bbox": [ + 44, + 627, + 95, + 646 + ], + "spans": [ + { + "bbox": [ + 44, + 627, + 95, + 646 + ], + "type": "image", + "image_path": "8791541988ff67791f7d7b874ab136d22e0e41969cd1283171572f806c51e768.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 15, + 136, + 37, + 483 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 15, + 136, + 37, + 483 + ], + "spans": [ + { + "bbox": [ + 15, + 136, + 37, + 483 + ], + "type": "text", + "content": "arXiv:2504.09246v2 [cs.LG] 8 May 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 43, + 648, + 366, + 658 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 648, + 366, + 658 + ], + "spans": [ + { + "bbox": [ + 43, + 648, + 366, + 658 + ], + "type": "text", + "content": "This work is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License." + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 42, + 85, + 442, + 121 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 85, + 442, + 121 + ], + "spans": [ + { + "bbox": [ + 42, + 85, + 442, + 121 + ], + "type": "text", + "content": "However, these improvements are limited, as syntax accounts for only a small part of overall program correctness. For instance, in our evaluation of state-of-the-art open-weight LLMs (§5), syntactic errors make up on average " + }, + { + "bbox": [ + 42, + 85, + 442, + 121 + ], + "type": "inline_equation", + "content": "6\\%" + }, + { + "bbox": [ + 42, + 85, + 442, + 121 + ], + "type": "text", + "content": " of all compilation errors in generated TypeScript code." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 42, + 129, + 442, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 129, + 442, + 201 + ], + "spans": [ + { + "bbox": [ + 42, + 129, + 442, + 201 + ], + "type": "text", + "content": "Key Challenge: Generating Well-Typed Code. Beyond program syntax, type systems detect and reject bugs at compile time [40, 43] and are therefore enforced in many popular programming languages [4, 10, 19]. We observe that LLMs struggle to generate well-typed code [20, 29, 63], as typing rules significantly complicate the generation of valid code [62]. In our evaluation of LLMs (§5), on average " + }, + { + "bbox": [ + 42, + 129, + 442, + 201 + ], + "type": "inline_equation", + "content": "94\\%" + }, + { + "bbox": [ + 42, + 129, + 442, + 201 + ], + "type": "text", + "content": " of compilation errors result from failing type checks. This suggests a promising direction: guiding LLMs' code generation process by incorporating the formal rules of type systems." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 42, + 201, + 442, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 201, + 442, + 310 + ], + "spans": [ + { + "bbox": [ + 42, + 201, + 442, + 310 + ], + "type": "text", + "content": "However, implementing this approach is challenging because type systems can in general not be captured by context-free grammars [43], prohibiting the application of prior constrained decoding methods developed for program syntax [8, 66]. Furthermore, besides deriving and maintaining a type environment for completed expressions during generation (similar to classic type systems), we need to accurately assess and handle partial expressions. Specifically, for each currently generated partial expression, we must decide whether the partial expression can be completed to match a required type. Determining this would allow us to constrain the LLM to provably generate well-typed expressions upon termination, but involves solving the challenging problem of type inhabitation [30, 67] in the novel context of LLM-based code generation." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 42, + 317, + 442, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 317, + 442, + 437 + ], + "spans": [ + { + "bbox": [ + 42, + 317, + 442, + 437 + ], + "type": "text", + "content": "This Work: Type-Constrained Decoding. In this work, we introduce type-constrained decoding1, addressing the challenge of generating well-typed code using LLMs. We develop a sound algorithm to determine if a partial program can be completed into a well-typed program. This algorithm is based on a novel non-deterministic automaton we construct. The automaton incrementally builds abstract syntax trees described by the partial program and annotates them with type-relevant context, e.g., declared identifiers and expression types. It leverages such information to maintain a prefix property, ensuring that parsing a program prefix only results in a non-empty set of states when it can be completed into a well-typed program. To guarantee the prefix property, we design a sound type search algorithm that determines whether a partial expression can inhabit a given type. We construct our automaton for a generic, simply-typed Turing-complete calculus [10]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 42, + 437, + 442, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 437, + 442, + 581 + ], + "spans": [ + { + "bbox": [ + 42, + 437, + 442, + 581 + ], + "type": "text", + "content": "To demonstrate its practical effectiveness, we instantiate our approach on a non-trivial subset of TypeScript. We choose TypeScript for three key reasons: (i) it is currently one of the most actively used languages, e.g., in open-source projects on GitHub [26, 38]; (ii) as we show, state-of-the-art LLMs fail to reliably generate well-typed TypeScript code; (iii) its core type system is simple enough [10] to be suitable for developing the first prototype of our approach. We perform a comprehensive evaluation on TypeScript versions of the widely-used HumanEval and MBPP benchmarks [5, 13, 14], focusing on three common coding tasks: synthesis, translation, and repair. Our experimental results show that type-constrained decoding significantly enhances code generation for LLMs of various sizes (2B-34B parameters). For synthesis and translation, it reduces compilation errors by more than half and increases functional correctness relatively by " + }, + { + "bbox": [ + 42, + 437, + 442, + 581 + ], + "type": "inline_equation", + "content": "3.5\\%" + }, + { + "bbox": [ + 42, + 437, + 442, + 581 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 42, + 437, + 442, + 581 + ], + "type": "inline_equation", + "content": "5.5\\%" + }, + { + "bbox": [ + 42, + 437, + 442, + 581 + ], + "type": "text", + "content": ". Additionally, it enhances functionally correct repair of non-compiling code relatively by " + }, + { + "bbox": [ + 42, + 437, + 442, + 581 + ], + "type": "inline_equation", + "content": "37\\%" + }, + { + "bbox": [ + 42, + 437, + 442, + 581 + ], + "type": "text", + "content": " on average. We further investigate our approach in depth through a runtime analyses and case studies." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 42, + 581, + 442, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 581, + 442, + 629 + ], + "spans": [ + { + "bbox": [ + 42, + 581, + 442, + 629 + ], + "type": "text", + "content": "We highlight that our type constraining approach is broadly applicable to any language derivable from the core calculus, any code generation task in these languages, and any LLM utilizing next-token generation. In §6, we envision how our approach can benefit other production-ready languages and closed-weight LLMs." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 44, + 60, + 63, + 69 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 60, + 63, + 69 + ], + "spans": [ + { + "bbox": [ + 44, + 60, + 63, + 69 + ], + "type": "text", + "content": "171:2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 165, + 59, + 441, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 59, + 441, + 69 + ], + "spans": [ + { + "bbox": [ + 165, + 59, + 441, + 69 + ], + "type": "text", + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 43, + 647, + 410, + 658 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 647, + 410, + 658 + ], + "spans": [ + { + "bbox": [ + 43, + 647, + 410, + 658 + ], + "type": "text", + "content": "1Our code implementation is publicly available at https://github.com/eth-sri/type-constrained-code-generation." + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 43, + 85, + 370, + 97 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 85, + 370, + 97 + ], + "spans": [ + { + "bbox": [ + 43, + 85, + 370, + 97 + ], + "type": "text", + "content": "Main Contributions. Our main contributions can be summarized as follows:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 100, + 440, + 159 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 52, + 100, + 440, + 123 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 100, + 440, + 123 + ], + "spans": [ + { + "bbox": [ + 52, + 100, + 440, + 123 + ], + "type": "text", + "content": "- A prefix automaton and a type search algorithm to enable type constraining for LLM-based code generation, demonstrated on a generic, simply-typed core calculus (§3)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 123, + 425, + 135 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 123, + 425, + 135 + ], + "spans": [ + { + "bbox": [ + 52, + 123, + 425, + 135 + ], + "type": "text", + "content": "- An instantiation and extension of our approach to the popular TypeScript language (§4)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 136, + 440, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 136, + 440, + 159 + ], + "spans": [ + { + "bbox": [ + 52, + 136, + 440, + 159 + ], + "type": "text", + "content": "- An extensive evaluation across various LLMs and coding tasks, showing the significant benefit of our approach in reducing compilation errors and increasing functional correctness (§5)." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 43, + 168, + 184, + 180 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 168, + 184, + 180 + ], + "spans": [ + { + "bbox": [ + 43, + 168, + 184, + 180 + ], + "type": "text", + "content": "2 Background and Overview" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 42, + 182, + 441, + 218 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 182, + 441, + 218 + ], + "spans": [ + { + "bbox": [ + 42, + 182, + 441, + 218 + ], + "type": "text", + "content": "In this section, we first provide relevant background on LLM-based code generation and constrained decoding. Then, we motivate our type constraining approach using an illustrative example and present a high-level overview of its construction." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 42, + 228, + 396, + 240 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 228, + 396, + 240 + ], + "spans": [ + { + "bbox": [ + 42, + 228, + 396, + 240 + ], + "type": "text", + "content": "2.1 Background on LLM-based Code Generation and Constrained Decoding" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 42, + 242, + 235, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 242, + 235, + 469 + ], + "spans": [ + { + "bbox": [ + 42, + 242, + 235, + 469 + ], + "type": "text", + "content": "LLM-based Code Generation. LLMs generate code incrementally by sampling one token at a time in an iterative manner, as depicted in Algorithm 1 (without the blue highlights). A user prompt " + }, + { + "bbox": [ + 42, + 242, + 235, + 469 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 42, + 242, + 235, + 469 + ], + "type": "text", + "content": " specifies a code generation task for a trained LLM. At Line 1, the output program " + }, + { + "bbox": [ + 42, + 242, + 235, + 469 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 42, + 242, + 235, + 469 + ], + "type": "text", + "content": " is initialized to an empty string or a program prefix provided in " + }, + { + "bbox": [ + 42, + 242, + 235, + 469 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 42, + 242, + 235, + 469 + ], + "type": "text", + "content": ", e.g., a function signature. At the beginning of each generation iteration (Line 3), the LLM takes as input a concatenation " + }, + { + "bbox": [ + 42, + 242, + 235, + 469 + ], + "type": "inline_equation", + "content": "x \\circ s" + }, + { + "bbox": [ + 42, + 242, + 235, + 469 + ], + "type": "text", + "content": " of the prompt " + }, + { + "bbox": [ + 42, + 242, + 235, + 469 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 42, + 242, + 235, + 469 + ], + "type": "text", + "content": " and the current partial program " + }, + { + "bbox": [ + 42, + 242, + 235, + 469 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 42, + 242, + 235, + 469 + ], + "type": "text", + "content": ". It then predicts a probability distribution " + }, + { + "bbox": [ + 42, + 242, + 235, + 469 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 42, + 242, + 235, + 469 + ], + "type": "text", + "content": " over a fixed, finite set of tokens, the vocabulary, where each token may be a single Unicode character or a string of multiple characters. All common singleton characters are included in LLMs' vocabulary, ensuring that any standard program can be produced by concatenating tokens [60]. Next, based on distri" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 240, + 253, + 441, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 240, + 253, + 441, + 289 + ], + "spans": [ + { + "bbox": [ + 240, + 253, + 441, + 289 + ], + "type": "text", + "content": "Algorithm 1 Vanilla LLM-based code generation (without the blue highlights) vs. constrained decoding (with the blue highlights)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 240, + 293, + 440, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 240, + 293, + 440, + 316 + ], + "spans": [ + { + "bbox": [ + 240, + 293, + 440, + 316 + ], + "type": "text", + "content": "Input: LLM, prompt " + }, + { + "bbox": [ + 240, + 293, + 440, + 316 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 240, + 293, + 440, + 316 + ], + "type": "text", + "content": ", completion engine " + }, + { + "bbox": [ + 240, + 293, + 440, + 316 + ], + "type": "inline_equation", + "content": "CE_L" + }, + { + "bbox": [ + 240, + 293, + 440, + 316 + ], + "type": "text", + "content": " for language " + }, + { + "bbox": [ + 240, + 293, + 440, + 316 + ], + "type": "inline_equation", + "content": "L" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 241, + 317, + 387, + 329 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 241, + 317, + 387, + 329 + ], + "spans": [ + { + "bbox": [ + 241, + 317, + 387, + 329 + ], + "type": "text", + "content": "Output: Program " + }, + { + "bbox": [ + 241, + 317, + 387, + 329 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 241, + 317, + 387, + 329 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 241, + 317, + 387, + 329 + ], + "type": "inline_equation", + "content": "s \\in L" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 244, + 329, + 438, + 458 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 246, + 329, + 302, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 246, + 329, + 302, + 339 + ], + "spans": [ + { + "bbox": [ + 246, + 329, + 302, + 339 + ], + "type": "text", + "content": "1: initialize s" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 247, + 341, + 320, + 351 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 247, + 341, + 320, + 351 + ], + "spans": [ + { + "bbox": [ + 247, + 341, + 320, + 351 + ], + "type": "text", + "content": "2: while true do" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 247, + 352, + 342, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 247, + 352, + 342, + 364 + ], + "spans": [ + { + "bbox": [ + 247, + 352, + 342, + 364 + ], + "type": "text", + "content": "3: " + }, + { + "bbox": [ + 247, + 352, + 342, + 364 + ], + "type": "inline_equation", + "content": "\\pmb{v} := \\mathrm{LLM}(x \\circ s)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 247, + 365, + 337, + 375 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 247, + 365, + 337, + 375 + ], + "spans": [ + { + "bbox": [ + 247, + 365, + 337, + 375 + ], + "type": "text", + "content": "4: while true do" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 247, + 378, + 316, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 247, + 378, + 316, + 387 + ], + "spans": [ + { + "bbox": [ + 247, + 378, + 316, + 387 + ], + "type": "text", + "content": "5: " + }, + { + "bbox": [ + 247, + 378, + 316, + 387 + ], + "type": "inline_equation", + "content": "t\\sim \\pmb{v}" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 247, + 388, + 398, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 247, + 388, + 398, + 400 + ], + "spans": [ + { + "bbox": [ + 247, + 388, + 398, + 400 + ], + "type": "text", + "content": "6: if " + }, + { + "bbox": [ + 247, + 388, + 398, + 400 + ], + "type": "inline_equation", + "content": "CE_L(s \\circ t)" + }, + { + "bbox": [ + 247, + 388, + 398, + 400 + ], + "type": "text", + "content": " then break" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 244, + 401, + 438, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 244, + 401, + 438, + 411 + ], + "spans": [ + { + "bbox": [ + 244, + 401, + 438, + 411 + ], + "type": "text", + "content": "7: elif " + }, + { + "bbox": [ + 244, + 401, + 438, + 411 + ], + "type": "inline_equation", + "content": "t = EOS" + }, + { + "bbox": [ + 244, + 401, + 438, + 411 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 244, + 401, + 438, + 411 + ], + "type": "inline_equation", + "content": "s \\in L" + }, + { + "bbox": [ + 244, + 401, + 438, + 411 + ], + "type": "text", + "content": " then break" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 247, + 413, + 403, + 424 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 247, + 413, + 403, + 424 + ], + "spans": [ + { + "bbox": [ + 247, + 413, + 403, + 424 + ], + "type": "text", + "content": "8: else " + }, + { + "bbox": [ + 247, + 413, + 403, + 424 + ], + "type": "inline_equation", + "content": "\\pmb{v}[t] := 0" + }, + { + "bbox": [ + 247, + 413, + 403, + 424 + ], + "type": "text", + "content": "; normalize " + }, + { + "bbox": [ + 247, + 413, + 403, + 424 + ], + "type": "inline_equation", + "content": "\\pmb{v}" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 244, + 424, + 372, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 244, + 424, + 372, + 435 + ], + "spans": [ + { + "bbox": [ + 244, + 424, + 372, + 435 + ], + "type": "text", + "content": "9: if " + }, + { + "bbox": [ + 244, + 424, + 372, + 435 + ], + "type": "inline_equation", + "content": "t = EOS" + }, + { + "bbox": [ + 244, + 424, + 372, + 435 + ], + "type": "text", + "content": " then break" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 244, + 437, + 312, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 244, + 437, + 312, + 447 + ], + "spans": [ + { + "bbox": [ + 244, + 437, + 312, + 447 + ], + "type": "text", + "content": "10: " + }, + { + "bbox": [ + 244, + 437, + 312, + 447 + ], + "type": "inline_equation", + "content": "s := s \\circ t" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 244, + 450, + 295, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 244, + 450, + 295, + 458 + ], + "spans": [ + { + "bbox": [ + 244, + 450, + 295, + 458 + ], + "type": "text", + "content": "11: return s" + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 42, + 470, + 442, + 493 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 470, + 442, + 493 + ], + "spans": [ + { + "bbox": [ + 42, + 470, + 442, + 493 + ], + "type": "text", + "content": "bution " + }, + { + "bbox": [ + 42, + 470, + 442, + 493 + ], + "type": "inline_equation", + "content": "\\pmb{v}" + }, + { + "bbox": [ + 42, + 470, + 442, + 493 + ], + "type": "text", + "content": ", a token " + }, + { + "bbox": [ + 42, + 470, + 442, + 493 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 42, + 470, + 442, + 493 + ], + "type": "text", + "content": " is sampled (Line 5) and appended to the program " + }, + { + "bbox": [ + 42, + 470, + 442, + 493 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 42, + 470, + 442, + 493 + ], + "type": "text", + "content": " (Line 10). This process is repeated until we encounter the special token EOS which signifies the end of the sequence (Line 9)." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 42, + 493, + 441, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 493, + 441, + 554 + ], + "spans": [ + { + "bbox": [ + 42, + 493, + 441, + 554 + ], + "type": "text", + "content": "LLMs learn to predict adequate probability distributions from extensive training on natural and programming languages [12, 59, 73]. These distributions implicitly encode language rules, allowing LLMs to successfully solve code generation tasks [13, 28, 59]. However, LLMs may fail to infer complex rules [9, 21, 72], derive incomplete rules for less common languages [13, 51], and, due to the probabilistic nature of its generation procedure, not consistently follow formal language rules." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 42, + 559, + 442, + 644 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 559, + 442, + 644 + ], + "spans": [ + { + "bbox": [ + 42, + 559, + 442, + 644 + ], + "type": "text", + "content": "Constrained Decoding. The aforementioned shortcoming of LLMs can be mitigated by employing constrained decoding, which analyzes the intermediate model outputs " + }, + { + "bbox": [ + 42, + 559, + 442, + 644 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 42, + 559, + 442, + 644 + ], + "type": "text", + "content": " during the generation process and enforces that only valid tokens are incorporated. Specifically, constrained decoding leverages a completion engine " + }, + { + "bbox": [ + 42, + 559, + 442, + 644 + ], + "type": "inline_equation", + "content": "CE_{L}" + }, + { + "bbox": [ + 42, + 559, + 442, + 644 + ], + "type": "text", + "content": ", specific to a language " + }, + { + "bbox": [ + 42, + 559, + 442, + 644 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 42, + 559, + 442, + 644 + ], + "type": "text", + "content": ". Computing " + }, + { + "bbox": [ + 42, + 559, + 442, + 644 + ], + "type": "inline_equation", + "content": "CE_{L}(s)" + }, + { + "bbox": [ + 42, + 559, + 442, + 644 + ], + "type": "text", + "content": " returns whether partial program " + }, + { + "bbox": [ + 42, + 559, + 442, + 644 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 42, + 559, + 442, + 644 + ], + "type": "text", + "content": " can be completed to a well-formed program in " + }, + { + "bbox": [ + 42, + 559, + 442, + 644 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 42, + 559, + 442, + 644 + ], + "type": "text", + "content": ", meaning whether there exists a (possibly empty) string " + }, + { + "bbox": [ + 42, + 559, + 442, + 644 + ], + "type": "inline_equation", + "content": "s'" + }, + { + "bbox": [ + 42, + 559, + 442, + 644 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 42, + 559, + 442, + 644 + ], + "type": "inline_equation", + "content": "s \\circ s' \\in L" + }, + { + "bbox": [ + 42, + 559, + 442, + 644 + ], + "type": "text", + "content": ". Equivalently, " + }, + { + "bbox": [ + 42, + 559, + 442, + 644 + ], + "type": "inline_equation", + "content": "CE_{L}(s)" + }, + { + "bbox": [ + 42, + 559, + 442, + 644 + ], + "type": "text", + "content": " determines whether " + }, + { + "bbox": [ + 42, + 559, + 442, + 644 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 42, + 559, + 442, + 644 + ], + "type": "text", + "content": " belongs to the prefix language " + }, + { + "bbox": [ + 42, + 559, + 442, + 644 + ], + "type": "inline_equation", + "content": "L^{p}" + }, + { + "bbox": [ + 42, + 559, + 442, + 644 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 42, + 559, + 442, + 644 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 42, + 559, + 442, + 644 + ], + "type": "text", + "content": ", i.e., whether " + }, + { + "bbox": [ + 42, + 559, + 442, + 644 + ], + "type": "inline_equation", + "content": "s \\in L^{p}" + }, + { + "bbox": [ + 42, + 559, + 442, + 644 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 42, + 559, + 442, + 644 + ], + "type": "inline_equation", + "content": "L^{p}" + }, + { + "bbox": [ + 42, + 559, + 442, + 644 + ], + "type": "text", + "content": " is formally defined as follows:" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 52, + 647, + 403, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 647, + 403, + 660 + ], + "spans": [ + { + "bbox": [ + 52, + 647, + 403, + 660 + ], + "type": "text", + "content": "DEFINITION 1. For a given language " + }, + { + "bbox": [ + 52, + 647, + 403, + 660 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 52, + 647, + 403, + 660 + ], + "type": "text", + "content": ", its prefix language is " + }, + { + "bbox": [ + 52, + 647, + 403, + 660 + ], + "type": "inline_equation", + "content": "L^p \\coloneqq \\{s \\mid \\exists s': s \\circ s' \\in L\\}" + }, + { + "bbox": [ + 52, + 647, + 403, + 660 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 29 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 43, + 59, + 241, + 70 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 59, + 241, + 70 + ], + "spans": [ + { + "bbox": [ + 43, + 59, + 241, + 70 + ], + "type": "text", + "content": "Type-Constrained Code Generation with Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 422, + 60, + 441, + 69 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 422, + 60, + 441, + 69 + ], + "spans": [ + { + "bbox": [ + 422, + 60, + 441, + 69 + ], + "type": "text", + "content": "171:3" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 47, + 83, + 439, + 159 + ], + "blocks": [ + { + "bbox": [ + 47, + 83, + 439, + 159 + ], + "lines": [ + { + "bbox": [ + 47, + 83, + 439, + 159 + ], + "spans": [ + { + "bbox": [ + 47, + 83, + 439, + 159 + ], + "type": "table", + "html": "
function is_int(text: string): boolean {<completion>VanillaSyntaxTypesDescription
const num = Number(text);(1) ;acceptrejectrejectSyntactically invalid
return !isNaN(num) &&(2) beracceptacceptrejectUndeclared identifier
parseInt(num <completion>(3) ()acceptacceptrejectDisallowed operator
(4), 10)(5).string()acceptacceptrejectInvalid argument type
acceptacceptacceptWell-formed option
", + "image_path": "aed795b7a573ef796c6718dfe29dd3f1629d86f6914bb2b22785860942121b53.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 42, + 164, + 441, + 198 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 164, + 441, + 198 + ], + "spans": [ + { + "bbox": [ + 42, + 164, + 441, + 198 + ], + "type": "text", + "content": "Figure 1. Left is a partial TypeScript program derived from instance #113 of the MBPP benchmark [5], awaiting completion. Right are five completion options: (1)-(4) are invalid and (5) is well-formed. Our type-constrained decoding is the only approach capable of correctly rejecting invalid completions and accepting the valid one." + } + ] + } + ], + "index": 3, + "type": "text" + }, + { + "bbox": [ + 42, + 216, + 442, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 216, + 442, + 323 + ], + "spans": [ + { + "bbox": [ + 42, + 216, + 442, + 323 + ], + "type": "text", + "content": "As illustrated in blue highlights of Algorithm 1, constrained decoding differs from vanilla LLM-based code generation by adding an additional sample-and-check loop at Line 4 around the token sampling process at Line 5. A sampled token " + }, + { + "bbox": [ + 42, + 216, + 442, + 323 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 42, + 216, + 442, + 323 + ], + "type": "text", + "content": " is considered further only if " + }, + { + "bbox": [ + 42, + 216, + 442, + 323 + ], + "type": "inline_equation", + "content": "s \\circ t" + }, + { + "bbox": [ + 42, + 216, + 442, + 323 + ], + "type": "text", + "content": " can be completed to a well-formed program (Line 6) or " + }, + { + "bbox": [ + 42, + 216, + 442, + 323 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 42, + 216, + 442, + 323 + ], + "type": "text", + "content": " is EOS and " + }, + { + "bbox": [ + 42, + 216, + 442, + 323 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 42, + 216, + 442, + 323 + ], + "type": "text", + "content": " is already well-formed in " + }, + { + "bbox": [ + 42, + 216, + 442, + 323 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 42, + 216, + 442, + 323 + ], + "type": "text", + "content": " (Line 7). Otherwise, the probability of " + }, + { + "bbox": [ + 42, + 216, + 442, + 323 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 42, + 216, + 442, + 323 + ], + "type": "text", + "content": " is set to zero at Line 8, and the sample-and-check loop repeats. Note that a token " + }, + { + "bbox": [ + 42, + 216, + 442, + 323 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 42, + 216, + 442, + 323 + ], + "type": "text", + "content": " satisfying either Line 6 or Line 7 always exists, because " + }, + { + "bbox": [ + 42, + 216, + 442, + 323 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 42, + 216, + 442, + 323 + ], + "type": "text", + "content": " is in " + }, + { + "bbox": [ + 42, + 216, + 442, + 323 + ], + "type": "inline_equation", + "content": "L^p" + }, + { + "bbox": [ + 42, + 216, + 442, + 323 + ], + "type": "text", + "content": " and LLMs' vocabulary contains all common characters. Therefore, the number of iterations of the loop at Line 4 is bounded by the fixed LLM vocabulary size. In practice, only few iterations are needed (\\$5.3) and do not require additional LLM inference, ensuring a reasonable runtime overhead compared to vanilla decoding." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 42, + 324, + 441, + 395 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 324, + 441, + 395 + ], + "spans": [ + { + "bbox": [ + 42, + 324, + 441, + 395 + ], + "type": "text", + "content": "The token-level guarantees extend inductively to guarantee the final program's validity with respect to " + }, + { + "bbox": [ + 42, + 324, + 441, + 395 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 42, + 324, + 441, + 395 + ], + "type": "text", + "content": ". At Line 1, we start with a valid prefix in " + }, + { + "bbox": [ + 42, + 324, + 441, + 395 + ], + "type": "inline_equation", + "content": "L^p" + }, + { + "bbox": [ + 42, + 324, + 441, + 395 + ], + "type": "text", + "content": ", i.e., either an empty string or a valid prefix provided in the user prompt. The check at Line 6 ensures that all intermediate outputs " + }, + { + "bbox": [ + 42, + 324, + 441, + 395 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 42, + 324, + 441, + 395 + ], + "type": "text", + "content": " are prefixes in " + }, + { + "bbox": [ + 42, + 324, + 441, + 395 + ], + "type": "inline_equation", + "content": "L^p" + }, + { + "bbox": [ + 42, + 324, + 441, + 395 + ], + "type": "text", + "content": ". Additionally, Line 7 and Line 9 ensure that the return statement in Line 11 is reached only if " + }, + { + "bbox": [ + 42, + 324, + 441, + 395 + ], + "type": "inline_equation", + "content": "s \\in L" + }, + { + "bbox": [ + 42, + 324, + 441, + 395 + ], + "type": "text", + "content": ". As an additional benefit, by steering previously ill-formed generations into well-formed ones, constrained decoding also increases the likelihood of generating functionally correct code." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 42, + 396, + 442, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 396, + 442, + 443 + ], + "spans": [ + { + "bbox": [ + 42, + 396, + 442, + 443 + ], + "type": "text", + "content": "Note that commonly used grammar and type checkers can not be used as a completion engine for constrained decoding. They judge whether a program string " + }, + { + "bbox": [ + 42, + 396, + 442, + 443 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 42, + 396, + 442, + 443 + ], + "type": "text", + "content": " is well-formed according to the language " + }, + { + "bbox": [ + 42, + 396, + 442, + 443 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 42, + 396, + 442, + 443 + ], + "type": "text", + "content": ", i.e., whether " + }, + { + "bbox": [ + 42, + 396, + 442, + 443 + ], + "type": "inline_equation", + "content": "s \\in L" + }, + { + "bbox": [ + 42, + 396, + 442, + 443 + ], + "type": "text", + "content": ". When " + }, + { + "bbox": [ + 42, + 396, + 442, + 443 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 42, + 396, + 442, + 443 + ], + "type": "text", + "content": " is not a complete program in " + }, + { + "bbox": [ + 42, + 396, + 442, + 443 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 42, + 396, + 442, + 443 + ], + "type": "text", + "content": ", but a valid prefix in " + }, + { + "bbox": [ + 42, + 396, + 442, + 443 + ], + "type": "inline_equation", + "content": "L^p" + }, + { + "bbox": [ + 42, + 396, + 442, + 443 + ], + "type": "text", + "content": ", they return a different output than " + }, + { + "bbox": [ + 42, + 396, + 442, + 443 + ], + "type": "inline_equation", + "content": "CE_L(s)" + }, + { + "bbox": [ + 42, + 396, + 442, + 443 + ], + "type": "text", + "content": ", which is not suitable for use in Algorithm 1." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 42, + 452, + 278, + 465 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 452, + 278, + 465 + ], + "spans": [ + { + "bbox": [ + 42, + 452, + 278, + 465 + ], + "type": "text", + "content": "2.2 Overview of Our Type Constraining Approach" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 42, + 468, + 442, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 468, + 442, + 550 + ], + "spans": [ + { + "bbox": [ + 42, + 468, + 442, + 550 + ], + "type": "text", + "content": "Inadequacy of Syntax-Only Constraining. To apply the constrained decoding algorithm described in §2, one needs to choose a language " + }, + { + "bbox": [ + 42, + 468, + 442, + 550 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 42, + 468, + 442, + 550 + ], + "type": "text", + "content": " and implement the completion engine " + }, + { + "bbox": [ + 42, + 468, + 442, + 550 + ], + "type": "inline_equation", + "content": "CE_{L}" + }, + { + "bbox": [ + 42, + 468, + 442, + 550 + ], + "type": "text", + "content": ". Recent work has explored defining " + }, + { + "bbox": [ + 42, + 468, + 442, + 550 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 42, + 468, + 442, + 550 + ], + "type": "text", + "content": " as the set of syntactically valid programs, thus leveraging the syntactic rules of programming languages for constrained decoding [8, 66, 71]. However, the benefits of this approach are limited, because syntax accounts for only a small portion of overall program correctness. For instance, across our evaluations (§5), only " + }, + { + "bbox": [ + 42, + 468, + 442, + 550 + ], + "type": "inline_equation", + "content": "3.5\\%" + }, + { + "bbox": [ + 42, + 468, + 442, + 550 + ], + "type": "text", + "content": " of the functional errors and " + }, + { + "bbox": [ + 42, + 468, + 442, + 550 + ], + "type": "inline_equation", + "content": "6\\%" + }, + { + "bbox": [ + 42, + 468, + 442, + 550 + ], + "type": "text", + "content": " of the compilation errors in LLM-generated code are due to syntactic errors." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 42, + 551, + 442, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 551, + 442, + 659 + ], + "spans": [ + { + "bbox": [ + 42, + 551, + 442, + 659 + ], + "type": "text", + "content": "We illustrate this limitation using the example in Figure 1. It presents five completion candidates for a partial program: (1)-(4) will lead to compilation errors and only (5) can result in a well-formed program. Based on syntax, completions that contain line terminations or invalid characters (e.g., $) could be rejected (1). However, many other cases, including (2)-(4), do not break syntactic rules but still cause compilation errors. For instance, candidate (2) results in accessing an undeclared identifier. In candidate (3), the function call operator will fail at execution time, as num is a number and can not be called. Candidate (4) passes a value of unexpected format to parseInt, which expects the first argument to be a string. In this example, (4) is generated by CodeLlama 34B [59]. Syntax-only constraining accepts this invalid completion, leading to a non-compilable final output." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 44, + 61, + 63, + 69 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 61, + 63, + 69 + ], + "spans": [ + { + "bbox": [ + 44, + 61, + 63, + 69 + ], + "type": "text", + "content": "171:4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 165, + 59, + 441, + 70 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 59, + 441, + 70 + ], + "spans": [ + { + "bbox": [ + 165, + 59, + 441, + 70 + ], + "type": "text", + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 42, + 85, + 442, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 85, + 442, + 192 + ], + "spans": [ + { + "bbox": [ + 42, + 85, + 442, + 192 + ], + "type": "text", + "content": "Our Approach: Leveraging the Type System. We require stronger constraints to effectively guide the model generation. Beyond syntax, type systems are commonly utilized in compilers, enforcing semantic rules to detect and reject bugs at compile time [23]. For Figure 1, the TypeScript type system would correctly reject code containing erroneous completions (2)-(4). Therefore, in this work, we propose leveraging type systems in constrained decoding to guide code generation. Our method accurately detects that only candidate (5) is a valid completion, guiding CodeLlama 34B to adopt this option and complete the program correctly. As detailed in §5, our experimental results demonstrate that our approach more than halves compiler errors in generated code and consistently increases the proportion of functionally correct programs." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 42, + 193, + 442, + 288 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 193, + 442, + 288 + ], + "spans": [ + { + "bbox": [ + 42, + 193, + 442, + 288 + ], + "type": "text", + "content": "Incorporating typing rules into code generation offers substantial potential but presents a significant challenge. Previous research has focused primarily on constrained decoding for context-free languages, for which prefixes can be efficiently determined [8, 66, 71]. Type systems, however, require language specifications that exceed the capabilities of context-free grammars [43], inhibiting the direct application of prior techniques to type-constrained decoding. Moreover, determining whether a partially generated expression can be completed to be a well-typed full expression involves not only type checking and inference, as done in traditional compilers, but also addressing type inhabitation [39, 67]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 42, + 288, + 442, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 288, + 442, + 397 + ], + "spans": [ + { + "bbox": [ + 42, + 288, + 442, + 397 + ], + "type": "text", + "content": "To address these challenges, we design and implement a practical approach to determine whether a string can be completed to a well-typed program. We begin by developing a specialized kind of non-deterministic automaton that maintains a prefix property, formally defined in §3.2. This property ensures that every reachable state can lead to an accepting state. We leverage this property to build a completion engine for constrained decoding as in Algorithm 1. We construct such a completion engine to enforce well-typedness for a simply-typed language " + }, + { + "bbox": [ + 42, + 288, + 442, + 397 + ], + "type": "inline_equation", + "content": "L_{B}" + }, + { + "bbox": [ + 42, + 288, + 442, + 397 + ], + "type": "text", + "content": " in §3.3-§3.5 and extend it to a core subset of TypeScript in §4. At a high level, the automaton acts as a syntactic parser, additionally maintaining information about initialized variables, enclosing function declarations, and other type-related aspects of the partially parsed syntax tree. This is possible through dynamically created" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 42, + 397, + 267, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 397, + 267, + 407 + ], + "spans": [ + { + "bbox": [ + 42, + 397, + 267, + 407 + ], + "type": "text", + "content": "annotated states that track the additional information." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 42, + 408, + 306, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 408, + 306, + 540 + ], + "spans": [ + { + "bbox": [ + 42, + 408, + 306, + 540 + ], + "type": "text", + "content": "In Figure 2, we provide a concrete example for our prefix automata. Every state represents the currently parsed syntactic component and additionally tracks the surrounding typing information. For example, after parsing the partial program in Figure 1, the automaton currently parses an expression as the first argument to function parseInt. Transitions are annotated with further code completions that are deemed admissible based on the syntax and typing information. In the first state, the automaton has parsed num, inferring from previous declarations that it represents an identifier of type number. Based on the signature of the parseInt function call, the required type of the completed" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 313, + 402, + 442, + 512 + ], + "blocks": [ + { + "bbox": [ + 313, + 402, + 442, + 512 + ], + "lines": [ + { + "bbox": [ + 313, + 402, + 442, + 512 + ], + "spans": [ + { + "bbox": [ + 313, + 402, + 442, + 512 + ], + "type": "image", + "image_path": "97d87110c0b553b842106c346a101d14eee820100979e854ea26b5cfa06cdac3.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 311, + 514, + 442, + 535 + ], + "lines": [ + { + "bbox": [ + 311, + 514, + 442, + 535 + ], + "spans": [ + { + "bbox": [ + 311, + 514, + 442, + 535 + ], + "type": "text", + "content": "Figure 2. An example of a prefix automaton." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 42, + 540, + 443, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 540, + 443, + 660 + ], + "spans": [ + { + "bbox": [ + 42, + 540, + 443, + 660 + ], + "type": "text", + "content": "argument is string. The automaton now determines the admissible transitions from the identifier state. State transitions corresponding to completions (1)-(4) from Figure 1 are disallowed, as they are determined to violate type rules based on the tracked type information. Further, the automaton needs to determine which operations on the current expression num of type number can be applied to obtain an expression of type string. To achieve this, we develop a type reachability search algorithm, which finds string-typed expressions num.toString() and num.isFinite(). toString(). Therefore, it returns that accesses to members .ToString and .isFinite are admissible, resulting in the two depicted transitions with the corresponding labels. In our experiment, CodeLlama 34B chooses to transition along .ToString(), the more likely completion based on its training data. Note that in our actual automaton formalism, as described at the end of §3.2, state transitions are" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 44, + 59, + 241, + 70 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 59, + 241, + 70 + ], + "spans": [ + { + "bbox": [ + 44, + 59, + 241, + 70 + ], + "type": "text", + "content": "Type-Constrained Code Generation with Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 422, + 60, + 441, + 69 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 422, + 60, + 441, + 69 + ], + "spans": [ + { + "bbox": [ + 422, + 60, + 441, + 69 + ], + "type": "text", + "content": "171:5" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 42, + 85, + 441, + 109 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 85, + 441, + 109 + ], + "spans": [ + { + "bbox": [ + 42, + 85, + 441, + 109 + ], + "type": "text", + "content": "on a character level. Figure 2 condenses character-level transitions into string-level transitions for presentation purposes." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 42, + 109, + 307, + 252 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 109, + 307, + 252 + ], + "spans": [ + { + "bbox": [ + 42, + 109, + 307, + 252 + ], + "type": "text", + "content": "The type reachability algorithm seeks to identify sequences of operators applied to a given expression such that the resulting expression possesses a required type. Conceptually, it performs a search over an abstracted type graph, whose nodes are types, and edges represent well-typed operations connecting the input and output types. An example of such a (partial) graph is shown in Figure 3, with a valid path highlighted in green color. Starting from the derived number type of num, the search first traverses a member access edge to reach the nullary function type () => string. Then, it traverses an edge representing a function call to reach the goal type string, concluding that the combination of traversed operators . toString() is a well-formed" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 42, + 253, + 440, + 288 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 253, + 440, + 288 + ], + "spans": [ + { + "bbox": [ + 42, + 253, + 440, + 288 + ], + "type": "text", + "content": "completion for Figure 1. The path for num.isFinite().ToString() is analogously valid but omitted in Figure 3 for brevity. This type reachability search is invoked every time a partial expression is parsed, in order to determine valid transitions in the prefix automaton." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 312, + 110, + 442, + 222 + ], + "blocks": [ + { + "bbox": [ + 312, + 110, + 442, + 222 + ], + "lines": [ + { + "bbox": [ + 312, + 110, + 442, + 222 + ], + "spans": [ + { + "bbox": [ + 312, + 110, + 442, + 222 + ], + "type": "image", + "image_path": "7677ba40fa4dc888f7b11ccdde1711e919f778b9badf962eced9f4c57b32331b.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 311, + 225, + 441, + 247 + ], + "lines": [ + { + "bbox": [ + 311, + 225, + 441, + 247 + ], + "spans": [ + { + "bbox": [ + 311, + 225, + 441, + 247 + ], + "type": "text", + "content": "Figure 3. An example of a partial type search graph." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 42, + 288, + 441, + 336 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 288, + 441, + 336 + ], + "spans": [ + { + "bbox": [ + 42, + 288, + 441, + 336 + ], + "type": "text", + "content": "We implement our approach for a significant subset of TypeScript (§4) and experimentally evaluate it for various LLMs and three important code generation tasks: synthesis, translation, and repair (§5). The results demonstrate that our approach provides significant benefits in both reducing compilation errors for LLM-generated code and increasing their functional correctness." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 42, + 345, + 212, + 357 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 345, + 212, + 357 + ], + "spans": [ + { + "bbox": [ + 42, + 345, + 212, + 357 + ], + "type": "text", + "content": "3 Our Type Constraining Approach" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 42, + 360, + 441, + 420 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 360, + 441, + 420 + ], + "spans": [ + { + "bbox": [ + 42, + 360, + 441, + 420 + ], + "type": "text", + "content": "In this section, we first present a generic, simply-typed language " + }, + { + "bbox": [ + 42, + 360, + 441, + 420 + ], + "type": "inline_equation", + "content": "L_{B}" + }, + { + "bbox": [ + 42, + 360, + 441, + 420 + ], + "type": "text", + "content": " (§3.1). Then, we present our type constraining approach using " + }, + { + "bbox": [ + 42, + 360, + 441, + 420 + ], + "type": "inline_equation", + "content": "L_{B}" + }, + { + "bbox": [ + 42, + 360, + 441, + 420 + ], + "type": "text", + "content": ". Specifically, we introduce our prefix automaton formalism (§3.2) and define increasingly complex automata for parsing well-typed fragments of " + }, + { + "bbox": [ + 42, + 360, + 441, + 420 + ], + "type": "inline_equation", + "content": "L_{B}" + }, + { + "bbox": [ + 42, + 360, + 441, + 420 + ], + "type": "text", + "content": ", beginning with identifiers, literals, and types (§3.3), continuing to expressions, including type search for type-restricted expressions (§3.4), and concluding with statements (§3.5)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 42, + 429, + 187, + 442 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 429, + 187, + 442 + ], + "spans": [ + { + "bbox": [ + 42, + 429, + 187, + 442 + ], + "type": "text", + "content": "3.1 A SimplyTyped Language" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 42, + 444, + 442, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 444, + 442, + 480 + ], + "spans": [ + { + "bbox": [ + 42, + 444, + 442, + 480 + ], + "type": "text", + "content": "We define a simply typed, Turing-complete language, " + }, + { + "bbox": [ + 42, + 444, + 442, + 480 + ], + "type": "inline_equation", + "content": "L_{B}" + }, + { + "bbox": [ + 42, + 444, + 442, + 480 + ], + "type": "text", + "content": ". Its grammar and type system are generic, resembling the principles found in popular statically typed languages, such as TypeScript, Java, and Go. However, there may be a slight bias towards TypeScript, as our implementation is based on it." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 42, + 486, + 442, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 486, + 442, + 582 + ], + "spans": [ + { + "bbox": [ + 42, + 486, + 442, + 582 + ], + "type": "text", + "content": "Syntax. The syntax of " + }, + { + "bbox": [ + 42, + 486, + 442, + 582 + ], + "type": "inline_equation", + "content": "L_{B}" + }, + { + "bbox": [ + 42, + 486, + 442, + 582 + ], + "type": "text", + "content": " is shown in Figure 4. The language includes expressions, type-annotated variable and function definitions, and control flows. Overall, it is based on a core subset of TypeScript [10] but can be adapted for other statically typed languages. Similar to Bierman et al. [10], we represent Kleene-Star repetitions using an overline, e.g., " + }, + { + "bbox": [ + 42, + 486, + 442, + 582 + ], + "type": "inline_equation", + "content": "\\overline{s}" + }, + { + "bbox": [ + 42, + 486, + 442, + 582 + ], + "type": "text", + "content": " represents a sequence of statements " + }, + { + "bbox": [ + 42, + 486, + 442, + 582 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 42, + 486, + 442, + 582 + ], + "type": "text", + "content": ", and adhere to the TypeScript documentation to annotate parameter types in function signatures with argument names [17]. We make a distinction between base and extension expressions. The latter applies operators to previous expressions, leading to more complex expressions. This differentiation is useful later in §3.4 for constructing the prefix automaton for parsing expressions." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 42, + 587, + 443, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 587, + 443, + 660 + ], + "spans": [ + { + "bbox": [ + 42, + 587, + 443, + 660 + ], + "type": "text", + "content": "Expression Typing Rules. The typing rules for " + }, + { + "bbox": [ + 42, + 587, + 443, + 660 + ], + "type": "inline_equation", + "content": "L_B" + }, + { + "bbox": [ + 42, + 587, + 443, + 660 + ], + "type": "text", + "content": "'s expressions are detailed in Figure 5. These rules form a subset of safeFTS, a type-safe portion of TypeScript described by Bierman et al. [10], allowing us to leverage their soundness results. The type rules for " + }, + { + "bbox": [ + 42, + 587, + 443, + 660 + ], + "type": "inline_equation", + "content": "L_B" + }, + { + "bbox": [ + 42, + 587, + 443, + 660 + ], + "type": "text", + "content": " use the standard concept of a type environment, denoted as " + }, + { + "bbox": [ + 42, + 587, + 443, + 660 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 42, + 587, + 443, + 660 + ], + "type": "text", + "content": ", which is a collection of pairs " + }, + { + "bbox": [ + 42, + 587, + 443, + 660 + ], + "type": "inline_equation", + "content": "(x : T)" + }, + { + "bbox": [ + 42, + 587, + 443, + 660 + ], + "type": "text", + "content": " of identifiers " + }, + { + "bbox": [ + 42, + 587, + 443, + 660 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 42, + 587, + 443, + 660 + ], + "type": "text", + "content": " and types " + }, + { + "bbox": [ + 42, + 587, + 443, + 660 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 42, + 587, + 443, + 660 + ], + "type": "text", + "content": ". We write " + }, + { + "bbox": [ + 42, + 587, + 443, + 660 + ], + "type": "inline_equation", + "content": "\\Gamma \\vdash e : T" + }, + { + "bbox": [ + 42, + 587, + 443, + 660 + ], + "type": "text", + "content": " if the expression " + }, + { + "bbox": [ + 42, + 587, + 443, + 660 + ], + "type": "inline_equation", + "content": "e" + }, + { + "bbox": [ + 42, + 587, + 443, + 660 + ], + "type": "text", + "content": " has type " + }, + { + "bbox": [ + 42, + 587, + 443, + 660 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 42, + 587, + 443, + 660 + ], + "type": "text", + "content": " in the type environment " + }, + { + "bbox": [ + 42, + 587, + 443, + 660 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 42, + 587, + 443, + 660 + ], + "type": "text", + "content": ". An expression " + }, + { + "bbox": [ + 42, + 587, + 443, + 660 + ], + "type": "inline_equation", + "content": "e" + }, + { + "bbox": [ + 42, + 587, + 443, + 660 + ], + "type": "text", + "content": " is considered valid if its type can be derived by applying the given typing rules." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 44, + 61, + 63, + 68 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 61, + 63, + 68 + ], + "spans": [ + { + "bbox": [ + 44, + 61, + 63, + 68 + ], + "type": "text", + "content": "171:6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 165, + 60, + 441, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 60, + 441, + 69 + ], + "spans": [ + { + "bbox": [ + 165, + 60, + 441, + 69 + ], + "type": "text", + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 47, + 83, + 426, + 270 + ], + "blocks": [ + { + "bbox": [ + 47, + 83, + 426, + 270 + ], + "lines": [ + { + "bbox": [ + 47, + 83, + 426, + 270 + ], + "spans": [ + { + "bbox": [ + 47, + 83, + 426, + 270 + ], + "type": "table", + "html": "
l ::=Literalp ::= x : TTyped Identifier
\\d+Numeric Literal
"\\"w*”String LiteralT ::=Type
true | falseBoolean LiteralnumberNumeric Type
stringString Type
x ::= \\w+IdentifierbooleanBoolean Type
(¯p) => TFunction Type
e ::= e0 | e1Expression
e0 ::=Base Expressions ::=Statement
lLiterallet x : T;Variable Declaration
xIdentifiere;Expression Statement
(¯p) => eFunction Expressionreturn e;Return Statement
(e)Grouped Expression{¯s}Statement Block
e1 ::=Extension Expressionfunction x (¯p) : T {¯s}Function Definition
e ⊙ eBinary Operatorif (e) s else sIf-Then-Else Statement
e(¯e)Function Call
e.nMember AccessM ::=¯sProgram
", + "image_path": "638ff536adeb7add76a966ada67900a19a856f6068152936b40b9f10840cbc7e.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 42, + 277, + 441, + 300 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 277, + 441, + 300 + ], + "spans": [ + { + "bbox": [ + 42, + 277, + 441, + 300 + ], + "type": "text", + "content": "Figure 4. The syntax of " + }, + { + "bbox": [ + 42, + 277, + 441, + 300 + ], + "type": "inline_equation", + "content": "L_{B}" + }, + { + "bbox": [ + 42, + 277, + 441, + 300 + ], + "type": "text", + "content": ". Expressions are categorized into base and extension expressions. The later extends a given expression with suffix operators to form more complicated expressions." + } + ] + } + ], + "index": 3, + "type": "text" + }, + { + "bbox": [ + 42, + 321, + 440, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 321, + 440, + 368 + ], + "spans": [ + { + "bbox": [ + 42, + 321, + 440, + 368 + ], + "type": "text", + "content": "_literals are evaluated to their respective types (LIT - {NUM, STR, BOOL}). Identifiers " + }, + { + "bbox": [ + 42, + 321, + 440, + 368 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 42, + 321, + 440, + 368 + ], + "type": "text", + "content": " are evaluated based on the corresponding type in the type environment (IDENT). Anonymous functions are typed according to their annotated parameter types, with the return type determined by the returned expression (ANON). Grouping preserves the type of the inner expression (GROUP)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 42, + 369, + 442, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 369, + 442, + 429 + ], + "spans": [ + { + "bbox": [ + 42, + 369, + 442, + 429 + ], + "type": "text", + "content": "Binary operators have predefined signatures " + }, + { + "bbox": [ + 42, + 369, + 442, + 429 + ], + "type": "inline_equation", + "content": "S_{1} \\odot S_{2}: T" + }, + { + "bbox": [ + 42, + 369, + 442, + 429 + ], + "type": "text", + "content": ", such as number + number : number for addition and " + }, + { + "bbox": [ + 42, + 369, + 442, + 429 + ], + "type": "inline_equation", + "content": "T = T: T" + }, + { + "bbox": [ + 42, + 369, + 442, + 429 + ], + "type": "text", + "content": " for assignments. These signatures must be satisfied in well-typed expressions (op). Function calls require parameters to match the function signature (CALL). The type of member accesses " + }, + { + "bbox": [ + 42, + 369, + 442, + 429 + ], + "type": "inline_equation", + "content": "e.n" + }, + { + "bbox": [ + 42, + 369, + 442, + 429 + ], + "type": "text", + "content": " is determined using an auxiliary function LOOKUP(S, n), which fetches the type of member " + }, + { + "bbox": [ + 42, + 369, + 442, + 429 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 42, + 369, + 442, + 429 + ], + "type": "text", + "content": " for type " + }, + { + "bbox": [ + 42, + 369, + 442, + 429 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 42, + 369, + 442, + 429 + ], + "type": "text", + "content": ". An instantiation of LOOKUP for TypeScript is provided by Bierman et al. [10]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 42, + 436, + 441, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 436, + 441, + 483 + ], + "spans": [ + { + "bbox": [ + 42, + 436, + 441, + 483 + ], + "type": "text", + "content": "Statements and Type Environments. The typing rules for statements are presented in Figure 6. Type environments are modified by statements, in particular variable declarations and function definitions. We use the notation " + }, + { + "bbox": [ + 42, + 436, + 441, + 483 + ], + "type": "inline_equation", + "content": "\\Gamma_1 \\vdash s \\rightarrow \\Gamma_2" + }, + { + "bbox": [ + 42, + 436, + 441, + 483 + ], + "type": "text", + "content": " to indicate that after executing statement " + }, + { + "bbox": [ + 42, + 436, + 441, + 483 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 42, + 436, + 441, + 483 + ], + "type": "text", + "content": " in type environment " + }, + { + "bbox": [ + 42, + 436, + 441, + 483 + ], + "type": "inline_equation", + "content": "\\Gamma_1" + }, + { + "bbox": [ + 42, + 436, + 441, + 483 + ], + "type": "text", + "content": ", the new environment is " + }, + { + "bbox": [ + 42, + 436, + 441, + 483 + ], + "type": "inline_equation", + "content": "\\Gamma_2" + }, + { + "bbox": [ + 42, + 436, + 441, + 483 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 42, + 483, + 442, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 483, + 442, + 580 + ], + "spans": [ + { + "bbox": [ + 42, + 483, + 442, + 580 + ], + "type": "text", + "content": "Variable declarations introduce the identifier with declared type into the type environment, provided the identifier is not already defined (DECL). The type environment defines the context to evaluate expressions (EXPR) and return statements (RET). Return statements are only well-typed inside function bodies. The statements inside statement blocks and if-then-else statements must maintain valid type environments, but do not have an external effect (BLOCK, ITE). This also applies to function definitions; however, the defined function is finally added to the external type environment (FUN). Lastly, empty statements do not alter the type environment (NOP), while statement sequences propagate the type environment along the execution (SEQ)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 42, + 586, + 441, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 586, + 441, + 634 + ], + "spans": [ + { + "bbox": [ + 42, + 586, + 441, + 634 + ], + "type": "text", + "content": "Return Types. The rules for checking return types are presented in Figure 7. Firstly, return statements must contain expressions matching the function's declared return type. Secondly, such an expression must be returned on every execution path. We use the notation " + }, + { + "bbox": [ + 42, + 586, + 441, + 634 + ], + "type": "inline_equation", + "content": "\\Gamma \\vdash \\overline{s} : R" + }, + { + "bbox": [ + 42, + 586, + 441, + 634 + ], + "type": "text", + "content": " to indicate the sequence of statements " + }, + { + "bbox": [ + 42, + 586, + 441, + 634 + ], + "type": "inline_equation", + "content": "\\overline{s}" + }, + { + "bbox": [ + 42, + 586, + 441, + 634 + ], + "type": "text", + "content": " ensures a return value of type " + }, + { + "bbox": [ + 42, + 586, + 441, + 634 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 42, + 586, + 441, + 634 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 42, + 635, + 441, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 635, + 441, + 659 + ], + "spans": [ + { + "bbox": [ + 42, + 635, + 441, + 659 + ], + "type": "text", + "content": "For variable declarations and expression statements, the return type of the subsequent statements is considered (R-DECL, R-EXPR). The return type of a return statement directly corresponds to the" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 43, + 59, + 240, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 59, + 240, + 69 + ], + "spans": [ + { + "bbox": [ + 43, + 59, + 240, + 69 + ], + "type": "text", + "content": "Type-Constrained Code Generation with Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 422, + 60, + 441, + 68 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 422, + 60, + 441, + 68 + ], + "spans": [ + { + "bbox": [ + 422, + 60, + 441, + 68 + ], + "type": "text", + "content": "171:7" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 86, + 446, + 105 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 86, + 446, + 105 + ], + "spans": [ + { + "bbox": [ + 44, + 86, + 446, + 105 + ], + "type": "interline_equation", + "content": "\\left[ \\mathrm {L I T - N U M} \\right] \\frac {}{\\Gamma \\vdash \\backslash d + : n u m b e r} \\quad \\left[ \\mathrm {L I T - S T R} \\right] \\frac {}{\\Gamma \\vdash \" \\backslash w * \" : s t r i n g} \\quad \\left[ \\mathrm {L I T - B O O L} \\right] \\frac {}{\\Gamma \\vdash \\text {t r u e , f a l s e : b o o l e a n}}", + "image_path": "2fd0789c478629059061d1f04d2504d5c39529783b9438d88c243ed727ac7a60.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 111, + 439, + 139 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 111, + 439, + 139 + ], + "spans": [ + { + "bbox": [ + 52, + 111, + 439, + 139 + ], + "type": "interline_equation", + "content": "\\left[ \\mathrm {I D E N T} \\right] \\frac {(x : T) \\in \\Gamma}{\\Gamma \\vdash x : T} \\quad \\left[ \\mathrm {A N O N} \\right] \\frac {\\Gamma \\cup \\overline {{p}} \\vdash e : T}{\\Gamma \\vdash (\\overline {{p}}) \\Rightarrow e : (\\overline {{p}}) \\Rightarrow T} \\quad \\left[ \\mathrm {C A L L} \\right] \\frac {\\Gamma \\vdash f : (\\overline {{x}} : \\overline {{S}}) \\Rightarrow T \\quad \\Gamma \\vdash \\overline {{e}} : \\overline {{S}}}{\\Gamma \\vdash f (\\overline {{e}}) : T}", + "image_path": "9d74ba92a9fcd3e1568ae4c53fd0457a08b4f8d70c4687357fd52cd36b559d6b.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 145, + 446, + 170 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 145, + 446, + 170 + ], + "spans": [ + { + "bbox": [ + 44, + 145, + 446, + 170 + ], + "type": "interline_equation", + "content": "\\left[ \\mathrm {G R O U P} \\right] \\frac {\\Gamma \\vdash e : T}{\\Gamma \\vdash (e) : T} \\quad \\left[ \\mathrm {O P} \\right] \\frac {\\Gamma \\vdash e _ {1} : S _ {1} \\quad \\Gamma \\vdash e _ {2} : S _ {2} \\quad S _ {1} \\odot S _ {2} : T}{\\Gamma \\vdash e _ {1} \\odot e _ {2} : T} \\quad \\left[ \\mathrm {M E M} \\right] \\frac {\\Gamma \\vdash e : S \\quad \\text {L O O K U P} (S , n) = T}{\\Gamma \\vdash e . n : T}", + "image_path": "f44709204f1915dbf83dda816a61b48ceff15c92295db67e8bf0a7f417ce5aea.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 160, + 178, + 323, + 190 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 178, + 323, + 190 + ], + "spans": [ + { + "bbox": [ + 160, + 178, + 323, + 190 + ], + "type": "text", + "content": "Figure 5. Typing rules for " + }, + { + "bbox": [ + 160, + 178, + 323, + 190 + ], + "type": "inline_equation", + "content": "L_B" + }, + { + "bbox": [ + 160, + 178, + 323, + 190 + ], + "type": "text", + "content": "'s expressions." + } + ] + } + ], + "index": 5, + "type": "text" + }, + { + "bbox": [ + 44, + 227, + 445, + 252 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 227, + 445, + 252 + ], + "spans": [ + { + "bbox": [ + 44, + 227, + 445, + 252 + ], + "type": "interline_equation", + "content": "\\left[ \\mathrm {D E C L} \\right] \\frac {x \\notin \\Gamma}{\\Gamma \\vdash \\operatorname {l e t} x : T ; \\rightarrow \\Gamma \\cup (x : T)} \\quad \\left[ \\mathrm {E X P R} \\right] \\frac {\\Gamma \\vdash e : T}{\\Gamma \\vdash e ; \\rightarrow \\Gamma} \\quad \\left[ \\mathrm {R E T} \\right] \\frac {\\text {i n s i d e f u n c t i o n b o d y} \\quad \\Gamma \\vdash e : T}{\\Gamma \\vdash \\operatorname {r e t u r n} e ; \\rightarrow \\Gamma}", + "image_path": "3d4a440dee5f7b23d9e7008f350194720276a69ffa55cf4cea128326c345decb.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 74, + 258, + 415, + 285 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 258, + 415, + 285 + ], + "spans": [ + { + "bbox": [ + 74, + 258, + 415, + 285 + ], + "type": "interline_equation", + "content": "\\left[ \\mathrm {B L O C K} \\right] \\frac {\\Gamma \\vdash \\overline {{s _ {B}}} \\leftrightarrow \\Gamma_ {B}}{\\Gamma \\vdash \\{\\overline {{s _ {B}}} \\} \\nrightarrow \\Gamma} \\quad \\left[ \\mathrm {F U N} \\right] \\frac {x \\notin \\Gamma \\qquad \\Gamma \\cup (x : (\\overline {{p}}) = > T) \\cup (\\overline {{p}}) \\vdash \\overline {{s _ {x}}} \\nrightarrow \\Gamma_ {x}}{\\Gamma_ {1} \\vdash \\text {f u n c t i o n} x (\\overline {{p}}) : T \\{\\overline {{s _ {x}}} \\} \\nrightarrow \\Gamma \\cup (x : (\\overline {{p}}) = > T)}", + "image_path": "17dc9774d0b942f13d1da1e6a3601e40b9e4be909052556ce334fd429a6d1c28.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 49, + 290, + 440, + 318 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 290, + 440, + 318 + ], + "spans": [ + { + "bbox": [ + 49, + 290, + 440, + 318 + ], + "type": "interline_equation", + "content": "\\left[ \\mathrm {I T E} \\right] \\frac {\\Gamma \\vdash s _ {i f} \\rightarrow \\Gamma_ {i f} \\quad \\Gamma \\vdash s _ {e l s e} \\leftrightarrow \\Gamma_ {e l s e}}{\\Gamma \\vdash \\mathrm {i f} (e) s _ {i f} \\mathrm {e l s e} s _ {e l s e} \\leftrightarrow \\Gamma} \\quad \\left[ \\mathrm {N O P} \\right] \\frac {}{\\Gamma \\vdash \\bullet \\rightarrow \\Gamma} \\quad \\left[ \\mathrm {S E Q} \\right] \\frac {\\Gamma_ {1} \\vdash \\bar {s} \\leftrightarrow \\Gamma_ {2} \\quad \\Gamma_ {2} \\vdash s \\leftrightarrow \\Gamma_ {3}}{\\Gamma_ {1} \\vdash \\bar {s} s \\leftrightarrow \\Gamma_ {3}}", + "image_path": "f1f6047b803098218c7b4710fa33ac5b887e6008b7372f9b4df3091be5d49f88.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 95, + 326, + 388, + 338 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 326, + 388, + 338 + ], + "spans": [ + { + "bbox": [ + 95, + 326, + 388, + 338 + ], + "type": "text", + "content": "Figure 6. Type environment extension rules for sequences of statements in " + }, + { + "bbox": [ + 95, + 326, + 388, + 338 + ], + "type": "inline_equation", + "content": "L_{B}" + }, + { + "bbox": [ + 95, + 326, + 388, + 338 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9, + "type": "text" + }, + { + "bbox": [ + 74, + 375, + 408, + 398 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 375, + 408, + 398 + ], + "spans": [ + { + "bbox": [ + 74, + 375, + 408, + 398 + ], + "type": "interline_equation", + "content": "\\left[ R - D E C L \\right] \\frac {\\Gamma \\vdash \\bar {s} : R}{\\Gamma \\vdash \\operatorname {l e t} x : T ; \\bar {s} : R} \\quad \\left[ R - E X P R \\right] \\frac {\\Gamma \\vdash \\bar {s} : R}{\\Gamma \\vdash e ; \\bar {s} : R} \\quad \\left[ R - R E T \\right] \\frac {\\Gamma \\vdash e : R}{\\Gamma \\vdash \\operatorname {r e t u r n} e ; \\bar {s} : R}", + "image_path": "813bece72a796c00b4947d62c4234cd08bdb1c79907a0935b2564c24da881c0e.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 101, + 403, + 384, + 429 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 101, + 403, + 384, + 429 + ], + "spans": [ + { + "bbox": [ + 101, + 403, + 384, + 429 + ], + "type": "interline_equation", + "content": "\\left[ \\mathrm {R - B L O C K - S E L F} \\right] \\frac {\\Gamma \\vdash \\overline {{s _ {B}}} : R \\quad \\Gamma \\vdash \\overline {{s}}}{\\Gamma \\vdash \\{\\overline {{s _ {B}}} \\} \\overline {{s}} : R} \\quad \\left[ \\mathrm {R - B L O C K - N E X T} \\right] \\frac {\\Gamma \\vdash \\overline {{s _ {B}}} \\quad \\Gamma \\vdash \\overline {{s}} : R}{\\Gamma \\vdash \\{\\overline {{s _ {B}}} \\} \\overline {{s}} : R}", + "image_path": "f073b7ee2da4caa55b5264bcf1f31d1350cf09e38048dbbafe09e355869400df.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 434, + 379, + 461 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 434, + 379, + 461 + ], + "spans": [ + { + "bbox": [ + 104, + 434, + 379, + 461 + ], + "type": "interline_equation", + "content": "\\left[ \\mathrm {R - F U N} \\right] \\frac {\\Gamma \\cup (x : (\\bar {p} \\Rightarrow R)) \\vdash \\bar {s} : R ^ {\\prime} \\quad \\Gamma \\cup (x : (\\bar {p}) \\Rightarrow R) \\cup (\\bar {p}) \\vdash \\bar {s _ {x}} : R}{\\Gamma \\vdash \\text {f u n c t i o n} x (\\bar {p}): R \\{\\bar {s _ {x}} \\} \\bar {s} : R ^ {\\prime}}", + "image_path": "9e6f17ee5a65303d2aa22c5b0747c5139f872d0dd3a0f6c386f946ab69f27555.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 67, + 467, + 417, + 496 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 467, + 417, + 496 + ], + "spans": [ + { + "bbox": [ + 67, + 467, + 417, + 496 + ], + "type": "interline_equation", + "content": "\\left[ \\mathrm {R - I T E - S E L F} \\right] \\frac {\\Gamma \\vdash s _ {i f} : R \\quad \\Gamma \\vdash s _ {e l s e} : R}{\\Gamma \\vdash \\mathrm {i f} (e) s _ {i f} \\mathrm {e l s e} s _ {e l s e} \\bar {s} : R} \\quad \\left[ \\mathrm {R - I T E - N E X T} \\right] \\frac {\\Gamma \\vdash \\bar {s} : R}{\\Gamma \\vdash \\mathrm {i f} (e) s _ {i f} \\mathrm {e l s e} s _ {e l s e} \\bar {s} : R}", + "image_path": "bc2c84cf62bc4f1b6b8be2c767b86dfdf0c9e4ef50d25a74033e23d5a5b1c1f5.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 153, + 504, + 331, + 515 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 504, + 331, + 515 + ], + "spans": [ + { + "bbox": [ + 153, + 504, + 331, + 515 + ], + "type": "text", + "content": "Figure 7. " + }, + { + "bbox": [ + 153, + 504, + 331, + 515 + ], + "type": "inline_equation", + "content": "L_{B}" + }, + { + "bbox": [ + 153, + 504, + 331, + 515 + ], + "type": "text", + "content": "'s typing rules for function returns." + } + ] + } + ], + "index": 14, + "type": "text" + }, + { + "bbox": [ + 42, + 550, + 442, + 623 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 550, + 442, + 623 + ], + "spans": [ + { + "bbox": [ + 42, + 550, + 442, + 623 + ], + "type": "text", + "content": "type of the returned expression (R-RET). For statement blocks, the return type is decided by either the block itself or the subsequent statements (R-BLOCK-SELF, R-BLOCK-NEXT). In function definitions, the return type is determined by the type of the subsequent statements, similar to expression statements. It is additionally required that the function body returns a type matching the declared return type (R-FUN). For if-then-else statements, both branches must return the same type (R-ITE-SELF), or the return type is determined by the following statements (R-ITE-NEXT)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 42, + 633, + 441, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 633, + 441, + 658 + ], + "spans": [ + { + "bbox": [ + 42, + 633, + 441, + 658 + ], + "type": "text", + "content": "Language Definition. In summary, a program " + }, + { + "bbox": [ + 42, + 633, + 441, + 658 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 42, + 633, + 441, + 658 + ], + "type": "text", + "content": " is in language " + }, + { + "bbox": [ + 42, + 633, + 441, + 658 + ], + "type": "inline_equation", + "content": "L_{B}" + }, + { + "bbox": [ + 42, + 633, + 441, + 658 + ], + "type": "text", + "content": " if both (i) " + }, + { + "bbox": [ + 42, + 633, + 441, + 658 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 42, + 633, + 441, + 658 + ], + "type": "text", + "content": " conform to the grammar in Figure 4 and (ii) " + }, + { + "bbox": [ + 42, + 633, + 441, + 658 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 42, + 633, + 441, + 658 + ], + "type": "text", + "content": " is well-typed according to the typing rules in Figures 5-7." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 44, + 61, + 63, + 69 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 61, + 63, + 69 + ], + "spans": [ + { + "bbox": [ + 44, + 61, + 63, + 69 + ], + "type": "text", + "content": "171:8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 165, + 60, + 441, + 70 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 60, + 441, + 70 + ], + "spans": [ + { + "bbox": [ + 165, + 60, + 441, + 70 + ], + "type": "text", + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 43, + 85, + 199, + 97 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 85, + 199, + 97 + ], + "spans": [ + { + "bbox": [ + 43, + 85, + 199, + 97 + ], + "type": "text", + "content": "3.2 Prefix Automaton Definition" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 42, + 100, + 442, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 100, + 442, + 160 + ], + "spans": [ + { + "bbox": [ + 42, + 100, + 442, + 160 + ], + "type": "text", + "content": "We introduce a general definition of prefix automata, beginning with basic automata concepts. Prefix automata are standard automata that ensure a special prefix property2. This property enables us to use a prefix automaton to decide whether some string is in the prefix language " + }, + { + "bbox": [ + 42, + 100, + 442, + 160 + ], + "type": "inline_equation", + "content": "L^p" + }, + { + "bbox": [ + 42, + 100, + 442, + 160 + ], + "type": "text", + "content": " of a given language " + }, + { + "bbox": [ + 42, + 100, + 442, + 160 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 42, + 100, + 442, + 160 + ], + "type": "text", + "content": ". That is, the prefix automaton can function as a completion engine " + }, + { + "bbox": [ + 42, + 100, + 442, + 160 + ], + "type": "inline_equation", + "content": "CE_L" + }, + { + "bbox": [ + 42, + 100, + 442, + 160 + ], + "type": "text", + "content": " to facilitate constrained decoding, as described in §2." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "spans": [ + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "text", + "content": "We consider an automaton " + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "inline_equation", + "content": "A \\coloneqq \\langle \\Sigma, Q, \\delta, I, F \\rangle" + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "text", + "content": ", a tuple of the five following elements: (i) " + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "inline_equation", + "content": "\\Sigma" + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "text", + "content": " is an alphabet of input symbols; (ii) " + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "text", + "content": " is a set of states; (iii) " + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "inline_equation", + "content": "\\delta : Q \\times \\Sigma \\mapsto \\mathcal{P}(Q)" + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "text", + "content": " is a computable transition function that maps a state and an input symbol to a finite set of next states; (iv) " + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "inline_equation", + "content": "I \\subseteq Q" + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "text", + "content": " is a finite set of initial states; and (v) " + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "inline_equation", + "content": "F \\subseteq Q" + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "text", + "content": " is a decidable set of accepting states. As a convention, we denote a symbol in " + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "inline_equation", + "content": "\\Sigma" + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "text", + "content": ", a string of symbols in " + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "inline_equation", + "content": "\\Sigma^*" + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "text", + "content": ", the empty string as " + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "inline_equation", + "content": "\\varepsilon" + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "text", + "content": " and an operator for concatenating symbols and strings as " + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "inline_equation", + "content": "\\circ" + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "text", + "content": ". The transition function " + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "text", + "content": " maps a given state to all possible subsequent states. When " + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "text", + "content": " is applied on a set of states " + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "inline_equation", + "content": "\\mathbf{q} \\subseteq Q" + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "text", + "content": ", we take the union of the results as output, i.e., " + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "inline_equation", + "content": "\\delta(\\mathbf{q}, c) \\coloneqq \\bigcup_{q \\in \\mathbf{q}} \\delta(q, c)" + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "text", + "content": ". The transition function defines a directed graph " + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "text", + "content": " over " + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "text", + "content": ", where every state is a node and there is an edge annotated with " + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "inline_equation", + "content": "q'" + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "text", + "content": " if " + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "inline_equation", + "content": "q' \\in \\delta(q, c)" + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "text", + "content": ". The language parsed by " + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "text", + "content": " comprises all strings " + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "text", + "content": " such that traversing " + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "text", + "content": " from some initial state in " + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "text", + "content": " along the edges annotated with " + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "inline_equation", + "content": "c_i" + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "inline_equation", + "content": "c_1 \\circ c_2 \\circ \\ldots \\circ c_n = s" + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "text", + "content": ", it is possible to reach some accepting state in " + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "text", + "content": ". Formally, we define recursively a traversal function " + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "text", + "content": " for states " + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "inline_equation", + "content": "\\mathbf{q}" + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "inline_equation", + "content": "\\gamma(\\mathbf{q}, \\varepsilon) \\coloneqq \\mathbf{q}" + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "inline_equation", + "content": "\\gamma(\\mathbf{q}, s \\circ c) \\coloneqq \\delta(\\gamma(\\mathbf{q}, s), c)" + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "text", + "content": ". The language accepted by " + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "text", + "content": " is then defined as " + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "inline_equation", + "content": "L(A) \\coloneqq \\{s \\mid \\gamma(I, s) \\cap F \\neq \\emptyset\\}" + }, + { + "bbox": [ + 42, + 159, + 442, + 328 + ], + "type": "text", + "content": ". The traversal function has two intuitive properties concerning reachability that can be shown inductively:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 329, + 441, + 354 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 47, + 329, + 383, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 329, + 383, + 342 + ], + "spans": [ + { + "bbox": [ + 47, + 329, + 383, + 342 + ], + "type": "text", + "content": "(P1) A path along the graph can be split arbitrarily, i.e., " + }, + { + "bbox": [ + 47, + 329, + 383, + 342 + ], + "type": "inline_equation", + "content": "\\gamma (\\mathbf{q},s\\circ s^{\\prime}) = \\gamma (\\gamma (\\mathbf{q},s),s^{\\prime})" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 342, + 441, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 342, + 441, + 354 + ], + "spans": [ + { + "bbox": [ + 46, + 342, + 441, + 354 + ], + "type": "text", + "content": "(P2) If a state is reached by " + }, + { + "bbox": [ + 46, + 342, + 441, + 354 + ], + "type": "inline_equation", + "content": "s \\circ s'" + }, + { + "bbox": [ + 46, + 342, + 441, + 354 + ], + "type": "text", + "content": ", some state is reachable by " + }, + { + "bbox": [ + 46, + 342, + 441, + 354 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 46, + 342, + 441, + 354 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 46, + 342, + 441, + 354 + ], + "type": "inline_equation", + "content": "\\gamma(\\mathbf{q}, s \\circ s') \\neq \\emptyset \\Rightarrow \\gamma(\\mathbf{q}, s) \\neq \\emptyset" + }, + { + "bbox": [ + 46, + 342, + 441, + 354 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 42, + 355, + 442, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 355, + 442, + 380 + ], + "spans": [ + { + "bbox": [ + 42, + 355, + 442, + 380 + ], + "type": "text", + "content": "An automaton satisfies the prefix property or is a prefix automaton, if there is a path from every reachable state to some accepting state, or formally:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 43, + 385, + 442, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 385, + 442, + 411 + ], + "spans": [ + { + "bbox": [ + 43, + 385, + 442, + 411 + ], + "type": "text", + "content": "DEFINITION 2. For an automaton " + }, + { + "bbox": [ + 43, + 385, + 442, + 411 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 43, + 385, + 442, + 411 + ], + "type": "text", + "content": ", the prefix property holds iff " + }, + { + "bbox": [ + 43, + 385, + 442, + 411 + ], + "type": "inline_equation", + "content": "\\forall q \\in \\gamma(I, s) : \\exists s' : \\gamma(q, s') \\cap F \\neq \\emptyset" + }, + { + "bbox": [ + 43, + 385, + 442, + 411 + ], + "type": "text", + "content": ". The automaton is a prefix automaton if it satisfies the prefix property." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 42, + 415, + 442, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 415, + 442, + 463 + ], + "spans": [ + { + "bbox": [ + 42, + 415, + 442, + 463 + ], + "type": "text", + "content": "Intuitively, for such automata, reaching some state by consuming string " + }, + { + "bbox": [ + 42, + 415, + 442, + 463 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 42, + 415, + 442, + 463 + ], + "type": "text", + "content": " implies that " + }, + { + "bbox": [ + 42, + 415, + 442, + 463 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 42, + 415, + 442, + 463 + ], + "type": "text", + "content": " is a prefix to some member of " + }, + { + "bbox": [ + 42, + 415, + 442, + 463 + ], + "type": "inline_equation", + "content": "L(A)" + }, + { + "bbox": [ + 42, + 415, + 442, + 463 + ], + "type": "text", + "content": ". We define the reachable language of " + }, + { + "bbox": [ + 42, + 415, + 442, + 463 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 42, + 415, + 442, + 463 + ], + "type": "text", + "content": ", all inputs that result in some state, as " + }, + { + "bbox": [ + 42, + 415, + 442, + 463 + ], + "type": "inline_equation", + "content": "L_r(A) := \\{s \\mid \\gamma(I, s) \\neq \\emptyset\\}" + }, + { + "bbox": [ + 42, + 415, + 442, + 463 + ], + "type": "text", + "content": ". Below, we establish the equivalence of " + }, + { + "bbox": [ + 42, + 415, + 442, + 463 + ], + "type": "inline_equation", + "content": "L_r(A)" + }, + { + "bbox": [ + 42, + 415, + 442, + 463 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 42, + 415, + 442, + 463 + ], + "type": "inline_equation", + "content": "L(A)^p" + }, + { + "bbox": [ + 42, + 415, + 442, + 463 + ], + "type": "text", + "content": ", the prefix language of " + }, + { + "bbox": [ + 42, + 415, + 442, + 463 + ], + "type": "inline_equation", + "content": "L(A)" + }, + { + "bbox": [ + 42, + 415, + 442, + 463 + ], + "type": "text", + "content": " as defined in Definition 1." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 52, + 469, + 287, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 469, + 287, + 482 + ], + "spans": [ + { + "bbox": [ + 52, + 469, + 287, + 482 + ], + "type": "text", + "content": "LEMMA 1. If " + }, + { + "bbox": [ + 52, + 469, + 287, + 482 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 52, + 469, + 287, + 482 + ], + "type": "text", + "content": " is a prefix automaton, then " + }, + { + "bbox": [ + 52, + 469, + 287, + 482 + ], + "type": "inline_equation", + "content": "L(A)^p = L_r(A)" + }, + { + "bbox": [ + 52, + 469, + 287, + 482 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 42, + 487, + 443, + 547 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 487, + 443, + 547 + ], + "spans": [ + { + "bbox": [ + 42, + 487, + 443, + 547 + ], + "type": "text", + "content": "Proof. For any " + }, + { + "bbox": [ + 42, + 487, + 443, + 547 + ], + "type": "inline_equation", + "content": "s \\in L(A)^p" + }, + { + "bbox": [ + 42, + 487, + 443, + 547 + ], + "type": "text", + "content": " there exists " + }, + { + "bbox": [ + 42, + 487, + 443, + 547 + ], + "type": "inline_equation", + "content": "s'" + }, + { + "bbox": [ + 42, + 487, + 443, + 547 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 42, + 487, + 443, + 547 + ], + "type": "inline_equation", + "content": "s \\circ s' \\in L(A)" + }, + { + "bbox": [ + 42, + 487, + 443, + 547 + ], + "type": "text", + "content": ", by the definition of prefix languages. By the definition of " + }, + { + "bbox": [ + 42, + 487, + 443, + 547 + ], + "type": "inline_equation", + "content": "L(A)" + }, + { + "bbox": [ + 42, + 487, + 443, + 547 + ], + "type": "text", + "content": ", this implies " + }, + { + "bbox": [ + 42, + 487, + 443, + 547 + ], + "type": "inline_equation", + "content": "\\gamma(I, s \\circ s') \\neq \\emptyset" + }, + { + "bbox": [ + 42, + 487, + 443, + 547 + ], + "type": "text", + "content": ". Then, using (P2), we further derive " + }, + { + "bbox": [ + 42, + 487, + 443, + 547 + ], + "type": "inline_equation", + "content": "\\gamma(I, s) \\neq \\emptyset" + }, + { + "bbox": [ + 42, + 487, + 443, + 547 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 42, + 487, + 443, + 547 + ], + "type": "inline_equation", + "content": "s \\in L_r(A)" + }, + { + "bbox": [ + 42, + 487, + 443, + 547 + ], + "type": "text", + "content": ". Therefore, " + }, + { + "bbox": [ + 42, + 487, + 443, + 547 + ], + "type": "inline_equation", + "content": "L(A)^p \\subseteq L_r(A)" + }, + { + "bbox": [ + 42, + 487, + 443, + 547 + ], + "type": "text", + "content": " holds. The other direction also holds. We first see that " + }, + { + "bbox": [ + 42, + 487, + 443, + 547 + ], + "type": "inline_equation", + "content": "s \\in L_r(A) \\implies \\gamma(I, s) \\neq \\emptyset" + }, + { + "bbox": [ + 42, + 487, + 443, + 547 + ], + "type": "text", + "content": ". Then applying Definition 2 and (P1), we find " + }, + { + "bbox": [ + 42, + 487, + 443, + 547 + ], + "type": "inline_equation", + "content": "\\exists s': \\gamma(I, s \\circ s') \\cap F \\neq \\emptyset" + }, + { + "bbox": [ + 42, + 487, + 443, + 547 + ], + "type": "text", + "content": ", implying " + }, + { + "bbox": [ + 42, + 487, + 443, + 547 + ], + "type": "inline_equation", + "content": "s \\circ s' \\in L(A)" + }, + { + "bbox": [ + 42, + 487, + 443, + 547 + ], + "type": "text", + "content": " and thus " + }, + { + "bbox": [ + 42, + 487, + 443, + 547 + ], + "type": "inline_equation", + "content": "s \\in L(A)^p" + }, + { + "bbox": [ + 42, + 487, + 443, + 547 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 42, + 552, + 442, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 552, + 442, + 578 + ], + "spans": [ + { + "bbox": [ + 42, + 552, + 442, + 578 + ], + "type": "text", + "content": "Note that " + }, + { + "bbox": [ + 42, + 552, + 442, + 578 + ], + "type": "inline_equation", + "content": "L(A)^P \\subseteq L_r(A)" + }, + { + "bbox": [ + 42, + 552, + 442, + 578 + ], + "type": "text", + "content": " holds generally for automata, since the first half of the proof does not require the prefix property." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 42, + 582, + 443, + 644 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 582, + 443, + 644 + ], + "spans": [ + { + "bbox": [ + 42, + 582, + 443, + 644 + ], + "type": "text", + "content": "From Prefix Automata to Completion Engines. With Lemma 1, given a prefix automaton " + }, + { + "bbox": [ + 42, + 582, + 443, + 644 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 42, + 582, + 443, + 644 + ], + "type": "text", + "content": ", we can define a convenient-to-compute completion engine for the underlying language " + }, + { + "bbox": [ + 42, + 582, + 443, + 644 + ], + "type": "inline_equation", + "content": "L(A)" + }, + { + "bbox": [ + 42, + 582, + 443, + 644 + ], + "type": "text", + "content": ": " + }, + { + "bbox": [ + 42, + 582, + 443, + 644 + ], + "type": "inline_equation", + "content": "CE_{L(A)}(s) \\coloneqq \\gamma(I, s) \\neq \\emptyset" + }, + { + "bbox": [ + 42, + 582, + 443, + 644 + ], + "type": "text", + "content": ". Since our target language is " + }, + { + "bbox": [ + 42, + 582, + 443, + 644 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 42, + 582, + 443, + 644 + ], + "type": "text", + "content": " and not " + }, + { + "bbox": [ + 42, + 582, + 443, + 644 + ], + "type": "inline_equation", + "content": "L(A)" + }, + { + "bbox": [ + 42, + 582, + 443, + 644 + ], + "type": "text", + "content": ", we now need to determine the relationship between " + }, + { + "bbox": [ + 42, + 582, + 443, + 644 + ], + "type": "inline_equation", + "content": "L(A)" + }, + { + "bbox": [ + 42, + 582, + 443, + 644 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 42, + 582, + 443, + 644 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 42, + 582, + 443, + 644 + ], + "type": "text", + "content": ". If we construct " + }, + { + "bbox": [ + 42, + 582, + 443, + 644 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 42, + 582, + 443, + 644 + ], + "type": "text", + "content": " such that it parses a subset of " + }, + { + "bbox": [ + 42, + 582, + 443, + 644 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 42, + 582, + 443, + 644 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 42, + 582, + 443, + 644 + ], + "type": "inline_equation", + "content": "L(A) \\subseteq L" + }, + { + "bbox": [ + 42, + 582, + 443, + 644 + ], + "type": "text", + "content": ", we are guaranteed that all LLM generations constrained by " + }, + { + "bbox": [ + 42, + 582, + 443, + 644 + ], + "type": "inline_equation", + "content": "CE_{L(A)}" + }, + { + "bbox": [ + 42, + 582, + 443, + 644 + ], + "type": "text", + "content": " lie in " + }, + { + "bbox": [ + 42, + 582, + 443, + 644 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 42, + 582, + 443, + 644 + ], + "type": "text", + "content": ". Conversely, if " + }, + { + "bbox": [ + 42, + 582, + 443, + 644 + ], + "type": "inline_equation", + "content": "L(A) \\supseteq L" + }, + { + "bbox": [ + 42, + 582, + 443, + 644 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 43, + 59, + 241, + 70 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 59, + 241, + 70 + ], + "spans": [ + { + "bbox": [ + 43, + 59, + 241, + 70 + ], + "type": "text", + "content": "Type-Constrained Code Generation with Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 422, + 60, + 441, + 69 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 422, + 60, + 441, + 69 + ], + "spans": [ + { + "bbox": [ + 422, + 60, + 441, + 69 + ], + "type": "text", + "content": "171:9" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 43, + 648, + 396, + 660 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 648, + 396, + 660 + ], + "spans": [ + { + "bbox": [ + 43, + 648, + 396, + 660 + ], + "type": "text", + "content": "Note that the prefix property defined in our work differs from the one discussed in classical texts, e.g., [31]" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 42, + 84, + 442, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 84, + 442, + 169 + ], + "spans": [ + { + "bbox": [ + 42, + 84, + 442, + 169 + ], + "type": "text", + "content": "we are guaranteed that every string in " + }, + { + "bbox": [ + 42, + 84, + 442, + 169 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 42, + 84, + 442, + 169 + ], + "type": "text", + "content": " can be expressed under constrained decoding, but not that every generation is valid. For example, if " + }, + { + "bbox": [ + 42, + 84, + 442, + 169 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 42, + 84, + 442, + 169 + ], + "type": "text", + "content": " permits all syntactically correct programs, it guarantees that all well-typed programs can be generated, but permits ill-typed programs as well. Therefore, " + }, + { + "bbox": [ + 42, + 84, + 442, + 169 + ], + "type": "inline_equation", + "content": "L(A) \\subseteq L" + }, + { + "bbox": [ + 42, + 84, + 442, + 169 + ], + "type": "text", + "content": " is required to achieve our goal of enforcing well-typedness on LLM-generated code. Ideally, " + }, + { + "bbox": [ + 42, + 84, + 442, + 169 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 42, + 84, + 442, + 169 + ], + "type": "text", + "content": " would parse " + }, + { + "bbox": [ + 42, + 84, + 442, + 169 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 42, + 84, + 442, + 169 + ], + "type": "text", + "content": " exactly, i.e., " + }, + { + "bbox": [ + 42, + 84, + 442, + 169 + ], + "type": "inline_equation", + "content": "L(A) = L" + }, + { + "bbox": [ + 42, + 84, + 442, + 169 + ], + "type": "text", + "content": ", which in our setting additionally guarantees that every well-typed program can be expressed under the constraints of the completion engine. If this is not achieved, it is important for " + }, + { + "bbox": [ + 42, + 84, + 442, + 169 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 42, + 84, + 442, + 169 + ], + "type": "text", + "content": " to capture a large subset of " + }, + { + "bbox": [ + 42, + 84, + 442, + 169 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 42, + 84, + 442, + 169 + ], + "type": "text", + "content": " to be practically useful." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 42, + 176, + 441, + 259 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 176, + 441, + 259 + ], + "spans": [ + { + "bbox": [ + 42, + 176, + 441, + 259 + ], + "type": "text", + "content": "Building a Prefix Automaton for " + }, + { + "bbox": [ + 42, + 176, + 441, + 259 + ], + "type": "inline_equation", + "content": "L_B" + }, + { + "bbox": [ + 42, + 176, + 441, + 259 + ], + "type": "text", + "content": ": Warming up. In the next sections, we will construct a prefix automaton for soundly parsing well-typed programs in " + }, + { + "bbox": [ + 42, + 176, + 441, + 259 + ], + "type": "inline_equation", + "content": "L_B" + }, + { + "bbox": [ + 42, + 176, + 441, + 259 + ], + "type": "text", + "content": ", by presenting various prefix automata for well-typed fragments of " + }, + { + "bbox": [ + 42, + 176, + 441, + 259 + ], + "type": "inline_equation", + "content": "L_B" + }, + { + "bbox": [ + 42, + 176, + 441, + 259 + ], + "type": "text", + "content": ". Our final automaton will cover a significant but incomplete subset of " + }, + { + "bbox": [ + 42, + 176, + 441, + 259 + ], + "type": "inline_equation", + "content": "L_B" + }, + { + "bbox": [ + 42, + 176, + 441, + 259 + ], + "type": "text", + "content": ". Incompleteness exists because to ensure that our algorithms terminate, we do not cover high-order types that are less likely to occur in practice. This is discussed in more detail in §3.4. Our evaluation in §5 empirically demonstrates that our approach sufficiently covers practical use cases to significantly improve the correctness of LLM-generated code." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 42, + 260, + 441, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 260, + 441, + 307 + ], + "spans": [ + { + "bbox": [ + 42, + 260, + 441, + 307 + ], + "type": "text", + "content": "We choose " + }, + { + "bbox": [ + 42, + 260, + 441, + 307 + ], + "type": "inline_equation", + "content": "\\Sigma" + }, + { + "bbox": [ + 42, + 260, + 441, + 307 + ], + "type": "text", + "content": " to be the set of Unicode characters. This makes our completion engine agnostic to LLM vocabularies. Even though LLMs' vocabularies differ, their tokens are always a string of single or multiple characters. When our completion engine for " + }, + { + "bbox": [ + 42, + 260, + 441, + 307 + ], + "type": "inline_equation", + "content": "L_{B}" + }, + { + "bbox": [ + 42, + 260, + 441, + 307 + ], + "type": "text", + "content": " is called during constrained decoding, i.e., at Line 6 of Algorithm 1, it processes the sampled token character by character." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 42, + 308, + 440, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 308, + 440, + 344 + ], + "spans": [ + { + "bbox": [ + 42, + 308, + 440, + 344 + ], + "type": "text", + "content": "Before proceeding, we briefly introduce several base prefix automata below, with their precise definitions detailed in Appendix A.1. These automata are later combined, with parts of the transition function being overwritten, to construct more complex automata that capture elements of " + }, + { + "bbox": [ + 42, + 308, + 440, + 344 + ], + "type": "inline_equation", + "content": "L_{B}" + }, + { + "bbox": [ + 42, + 308, + 440, + 344 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 347, + 439, + 442 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 52, + 347, + 439, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 347, + 439, + 370 + ], + "spans": [ + { + "bbox": [ + 52, + 347, + 439, + 370 + ], + "type": "text", + "content": "- Union " + }, + { + "bbox": [ + 52, + 347, + 439, + 370 + ], + "type": "inline_equation", + "content": "A_X \\cup A_Y" + }, + { + "bbox": [ + 52, + 347, + 439, + 370 + ], + "type": "text", + "content": " parses the language " + }, + { + "bbox": [ + 52, + 347, + 439, + 370 + ], + "type": "inline_equation", + "content": "\\{s \\mid s \\in L(A_X) \\cup L(A_Y)\\}" + }, + { + "bbox": [ + 52, + 347, + 439, + 370 + ], + "type": "text", + "content": ". It is a prefix automaton if both " + }, + { + "bbox": [ + 52, + 347, + 439, + 370 + ], + "type": "inline_equation", + "content": "A_X" + }, + { + "bbox": [ + 52, + 347, + 439, + 370 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 52, + 347, + 439, + 370 + ], + "type": "inline_equation", + "content": "A_Y" + }, + { + "bbox": [ + 52, + 347, + 439, + 370 + ], + "type": "text", + "content": " are prefix automata." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 371, + 439, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 371, + 439, + 393 + ], + "spans": [ + { + "bbox": [ + 52, + 371, + 439, + 393 + ], + "type": "text", + "content": "- Concatenation " + }, + { + "bbox": [ + 52, + 371, + 439, + 393 + ], + "type": "inline_equation", + "content": "A_{XY}" + }, + { + "bbox": [ + 52, + 371, + 439, + 393 + ], + "type": "text", + "content": " parses the language " + }, + { + "bbox": [ + 52, + 371, + 439, + 393 + ], + "type": "inline_equation", + "content": "\\{s \\circ s' \\mid s \\in L(A_X), s' \\in L(A_Y)\\}" + }, + { + "bbox": [ + 52, + 371, + 439, + 393 + ], + "type": "text", + "content": ". It is a prefix automaton if " + }, + { + "bbox": [ + 52, + 371, + 439, + 393 + ], + "type": "inline_equation", + "content": "A_X" + }, + { + "bbox": [ + 52, + 371, + 439, + 393 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 52, + 371, + 439, + 393 + ], + "type": "inline_equation", + "content": "A_Y" + }, + { + "bbox": [ + 52, + 371, + 439, + 393 + ], + "type": "text", + "content": " are both prefix automata, and " + }, + { + "bbox": [ + 52, + 371, + 439, + 393 + ], + "type": "inline_equation", + "content": "L(A_Y) \\neq \\emptyset" + }, + { + "bbox": [ + 52, + 371, + 439, + 393 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 395, + 439, + 417 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 395, + 439, + 417 + ], + "spans": [ + { + "bbox": [ + 52, + 395, + 439, + 417 + ], + "type": "text", + "content": "- Kleene-Star " + }, + { + "bbox": [ + 52, + 395, + 439, + 417 + ], + "type": "inline_equation", + "content": "A_{\\overline{X}}" + }, + { + "bbox": [ + 52, + 395, + 439, + 417 + ], + "type": "text", + "content": " parses the language " + }, + { + "bbox": [ + 52, + 395, + 439, + 417 + ], + "type": "inline_equation", + "content": "\\{\\overline{s} \\mid s \\in L(A_X)\\}" + }, + { + "bbox": [ + 52, + 395, + 439, + 417 + ], + "type": "text", + "content": ". It is a prefix automaton if " + }, + { + "bbox": [ + 52, + 395, + 439, + 417 + ], + "type": "inline_equation", + "content": "A_X" + }, + { + "bbox": [ + 52, + 395, + 439, + 417 + ], + "type": "text", + "content": " is a prefix automaton." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 419, + 367, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 419, + 367, + 430 + ], + "spans": [ + { + "bbox": [ + 52, + 419, + 367, + 430 + ], + "type": "text", + "content": "- Terminal " + }, + { + "bbox": [ + 52, + 419, + 367, + 430 + ], + "type": "inline_equation", + "content": "A_{\\mathsf{S}}" + }, + { + "bbox": [ + 52, + 419, + 367, + 430 + ], + "type": "text", + "content": " parses the language " + }, + { + "bbox": [ + 52, + 419, + 367, + 430 + ], + "type": "inline_equation", + "content": "\\{\\mathsf{S}\\}" + }, + { + "bbox": [ + 52, + 419, + 367, + 430 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 52, + 419, + 367, + 430 + ], + "type": "inline_equation", + "content": "\\mathsf{S}" + }, + { + "bbox": [ + 52, + 419, + 367, + 430 + ], + "type": "text", + "content": " is a fixed, non-empty string." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 52, + 432, + 366, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 432, + 366, + 442 + ], + "spans": [ + { + "bbox": [ + 52, + 432, + 366, + 442 + ], + "type": "text", + "content": "- Empty " + }, + { + "bbox": [ + 52, + 432, + 366, + 442 + ], + "type": "inline_equation", + "content": "A_{\\emptyset}" + }, + { + "bbox": [ + 52, + 432, + 366, + 442 + ], + "type": "text", + "content": " parses the empty language " + }, + { + "bbox": [ + 52, + 432, + 366, + 442 + ], + "type": "inline_equation", + "content": "\\varnothing" + }, + { + "bbox": [ + 52, + 432, + 366, + 442 + ], + "type": "text", + "content": " and is always a prefix automaton." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 42, + 453, + 298, + 466 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 453, + 298, + 466 + ], + "spans": [ + { + "bbox": [ + 42, + 453, + 298, + 466 + ], + "type": "text", + "content": "3.3 Prefix Automata for Identifiers, Literals, and Types" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 42, + 468, + 440, + 493 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 468, + 440, + 493 + ], + "spans": [ + { + "bbox": [ + 42, + 468, + 440, + 493 + ], + "type": "text", + "content": "We now introduce prefix automata for basic syntactic elements of " + }, + { + "bbox": [ + 42, + 468, + 440, + 493 + ], + "type": "inline_equation", + "content": "L_{B}" + }, + { + "bbox": [ + 42, + 468, + 440, + 493 + ], + "type": "text", + "content": ": identifiers, literals, and type annotations. The languages parsed by these automata exactly match their counterparts in " + }, + { + "bbox": [ + 42, + 468, + 440, + 493 + ], + "type": "inline_equation", + "content": "L_{B}" + }, + { + "bbox": [ + 42, + 468, + 440, + 493 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 42, + 500, + 441, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 500, + 441, + 560 + ], + "spans": [ + { + "bbox": [ + 42, + 500, + 441, + 560 + ], + "type": "text", + "content": "**Literals.** The prefix automaton for literals " + }, + { + "bbox": [ + 42, + 500, + 441, + 560 + ], + "type": "inline_equation", + "content": "A_{I} \\coloneqq A_{\\mathrm{NUM}} \\cup A_{\\mathrm{STR}} \\cup A_{\\mathrm{BOOL}}" + }, + { + "bbox": [ + 42, + 500, + 441, + 560 + ], + "type": "text", + "content": " accepts number, string, and boolean literals as defined in Figure 4. The automata " + }, + { + "bbox": [ + 42, + 500, + 441, + 560 + ], + "type": "inline_equation", + "content": "A_{\\mathrm{NUM}}, A_{\\mathrm{STR}}" + }, + { + "bbox": [ + 42, + 500, + 441, + 560 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 42, + 500, + 441, + 560 + ], + "type": "inline_equation", + "content": "A_{\\mathrm{BOOL}}" + }, + { + "bbox": [ + 42, + 500, + 441, + 560 + ], + "type": "text", + "content": " are defined by the deterministic finite automaton representation of the corresponding regular expression of the literal. To ensure the prefix property on the finite automata of the regular expression, we prune states from which accepting states can not be reached." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 42, + 567, + 441, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 567, + 441, + 604 + ], + "spans": [ + { + "bbox": [ + 42, + 567, + 441, + 604 + ], + "type": "text", + "content": "Identifiers. During parsing, we maintain the current type environment " + }, + { + "bbox": [ + 42, + 567, + 441, + 604 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 42, + 567, + 441, + 604 + ], + "type": "text", + "content": ", as detailed in §3.5. We define the identifier automaton " + }, + { + "bbox": [ + 42, + 567, + 441, + 604 + ], + "type": "inline_equation", + "content": "A_{x}" + }, + { + "bbox": [ + 42, + 567, + 441, + 604 + ], + "type": "text", + "content": " as the union of the terminal automata for identifiers defined in " + }, + { + "bbox": [ + 42, + 567, + 441, + 604 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 42, + 567, + 441, + 604 + ], + "type": "text", + "content": ". In other words, " + }, + { + "bbox": [ + 42, + 567, + 441, + 604 + ], + "type": "inline_equation", + "content": "A_{x} := \\bigcup_{y \\in \\Gamma} A_{y}" + }, + { + "bbox": [ + 42, + 567, + 441, + 604 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 42, + 610, + 442, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 610, + 442, + 659 + ], + "spans": [ + { + "bbox": [ + 42, + 610, + 442, + 659 + ], + "type": "text", + "content": "Types. The type automaton " + }, + { + "bbox": [ + 42, + 610, + 442, + 659 + ], + "type": "inline_equation", + "content": "A_{T}" + }, + { + "bbox": [ + 42, + 610, + 442, + 659 + ], + "type": "text", + "content": " accepts type annotations as defined in the grammar of " + }, + { + "bbox": [ + 42, + 610, + 442, + 659 + ], + "type": "inline_equation", + "content": "L_{B}" + }, + { + "bbox": [ + 42, + 610, + 442, + 659 + ], + "type": "text", + "content": " (Figure 4). It is defined as " + }, + { + "bbox": [ + 42, + 610, + 442, + 659 + ], + "type": "inline_equation", + "content": "A_{T} := A_{\\mathrm{TYPE - LIT}} \\cup A_{\\mathrm{TYPE - FUN}}" + }, + { + "bbox": [ + 42, + 610, + 442, + 659 + ], + "type": "text", + "content": ". This includes type literal automaton " + }, + { + "bbox": [ + 42, + 610, + 442, + 659 + ], + "type": "inline_equation", + "content": "A_{\\mathrm{TYPE - LIT}} := A_{\\mathrm{string}} \\cup A_{\\mathrm{number}} \\cup A_{\\mathrm{boolean}}" + }, + { + "bbox": [ + 42, + 610, + 442, + 659 + ], + "type": "text", + "content": " and function type automaton " + }, + { + "bbox": [ + 42, + 610, + 442, + 659 + ], + "type": "inline_equation", + "content": "A_{\\mathrm{TYPE - FUN}} := A_{(\\overline{p})} \\Rightarrow T" + }, + { + "bbox": [ + 42, + 610, + 442, + 659 + ], + "type": "text", + "content": ". The latter is a concatenation of multiple prefix automata, with the parameter and return types recursing on " + }, + { + "bbox": [ + 42, + 610, + 442, + 659 + ], + "type": "inline_equation", + "content": "A_{T}" + }, + { + "bbox": [ + 42, + 610, + 442, + 659 + ], + "type": "text", + "content": ". This recursive" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 44, + 60, + 67, + 69 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 60, + 67, + 69 + ], + "spans": [ + { + "bbox": [ + 44, + 60, + 67, + 69 + ], + "type": "text", + "content": "171:10" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 165, + 59, + 441, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 59, + 441, + 69 + ], + "spans": [ + { + "bbox": [ + 165, + 59, + 441, + 69 + ], + "type": "text", + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 42, + 84, + 440, + 109 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 84, + 440, + 109 + ], + "spans": [ + { + "bbox": [ + 42, + 84, + 440, + 109 + ], + "type": "text", + "content": "definition is valid, since it ensures a finite set of initial states, defines a decidable accepting set, and preserves the prefix property." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 43, + 119, + 222, + 131 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 119, + 222, + 131 + ], + "spans": [ + { + "bbox": [ + 43, + 119, + 222, + 131 + ], + "type": "text", + "content": "3.4 Prefix Automaton for Expressions" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 42, + 134, + 441, + 242 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 134, + 441, + 242 + ], + "spans": [ + { + "bbox": [ + 42, + 134, + 441, + 242 + ], + "type": "text", + "content": "We introduce prefix automata to parse well-typed expressions in " + }, + { + "bbox": [ + 42, + 134, + 441, + 242 + ], + "type": "inline_equation", + "content": "L_{B}" + }, + { + "bbox": [ + 42, + 134, + 441, + 242 + ], + "type": "text", + "content": ". We begin by describing an automaton " + }, + { + "bbox": [ + 42, + 134, + 441, + 242 + ], + "type": "inline_equation", + "content": "A_{e}" + }, + { + "bbox": [ + 42, + 134, + 441, + 242 + ], + "type": "text", + "content": " to parse expressions whose types are unrestricted, e.g., any expression " + }, + { + "bbox": [ + 42, + 134, + 441, + 242 + ], + "type": "inline_equation", + "content": "e" + }, + { + "bbox": [ + 42, + 134, + 441, + 242 + ], + "type": "text", + "content": " in an expression statement " + }, + { + "bbox": [ + 42, + 134, + 441, + 242 + ], + "type": "inline_equation", + "content": "e" + }, + { + "bbox": [ + 42, + 134, + 441, + 242 + ], + "type": "text", + "content": ";. Then, we present an automaton " + }, + { + "bbox": [ + 42, + 134, + 441, + 242 + ], + "type": "inline_equation", + "content": "A_{e} \\downarrow T" + }, + { + "bbox": [ + 42, + 134, + 441, + 242 + ], + "type": "text", + "content": " for expressions whose type is constrained to " + }, + { + "bbox": [ + 42, + 134, + 441, + 242 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 42, + 134, + 441, + 242 + ], + "type": "text", + "content": ", e.g., for parameters of function calls. The type-constrained version accepts a string only if the inhabited type of the represented expression matches " + }, + { + "bbox": [ + 42, + 134, + 441, + 242 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 42, + 134, + 441, + 242 + ], + "type": "text", + "content": ". To preserve the prefix property, we need to ensure that partial expressions can be completed to inhabit the constrained type. Completions may involve arbitrarily many applications of operators, which may modify the expression type. We therefore introduce a type search algorithm that soundly determines which types an expression can inhabit, and use it to prune transitions that violate the prefix property." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 42, + 249, + 442, + 296 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 249, + 442, + 296 + ], + "spans": [ + { + "bbox": [ + 42, + 249, + 442, + 296 + ], + "type": "text", + "content": "Unrestricted Expressions. To handle the recursive syntactic structure of expressions, we differentiate two kinds as shown in Figure 4: base expressions, including identifiers, literals, grouped expressions, and anonymous functions, and extension expressions, which are operator applications (binary operator, member access, or function call) that lead to extending a given expression." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 42, + 297, + 442, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 297, + 442, + 416 + ], + "spans": [ + { + "bbox": [ + 42, + 297, + 442, + 416 + ], + "type": "text", + "content": "The expression automaton " + }, + { + "bbox": [ + 42, + 297, + 442, + 416 + ], + "type": "inline_equation", + "content": "A_{e}" + }, + { + "bbox": [ + 42, + 297, + 442, + 416 + ], + "type": "text", + "content": " is thus defined as the union of base expression automata " + }, + { + "bbox": [ + 42, + 297, + 442, + 416 + ], + "type": "inline_equation", + "content": "A_{x}, A_{l}, A_{(e)}" + }, + { + "bbox": [ + 42, + 297, + 442, + 416 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 42, + 297, + 442, + 416 + ], + "type": "inline_equation", + "content": "A_{(\\overline{p})} \\Rightarrow e" + }, + { + "bbox": [ + 42, + 297, + 442, + 416 + ], + "type": "text", + "content": ", with potential extensions " + }, + { + "bbox": [ + 42, + 297, + 442, + 416 + ], + "type": "inline_equation", + "content": "A_{\\odot e}, A_{.n}" + }, + { + "bbox": [ + 42, + 297, + 442, + 416 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 42, + 297, + 442, + 416 + ], + "type": "inline_equation", + "content": "A_{(\\overline{e})}" + }, + { + "bbox": [ + 42, + 297, + 442, + 416 + ], + "type": "text", + "content": ". The individual base and extension automata are constructed by concatenating the respective terminal automata and recursively " + }, + { + "bbox": [ + 42, + 297, + 442, + 416 + ], + "type": "inline_equation", + "content": "A_{e}" + }, + { + "bbox": [ + 42, + 297, + 442, + 416 + ], + "type": "text", + "content": ". Additionally, we restrict the type of the recursive " + }, + { + "bbox": [ + 42, + 297, + 442, + 416 + ], + "type": "inline_equation", + "content": "A_{e}" + }, + { + "bbox": [ + 42, + 297, + 442, + 416 + ], + "type": "text", + "content": " if the restriction is required by the type system, e.g., for parsing call parameters with a fixed type. We provide additional details on this restriction in Appendix A.2. Since an expression can end after either base or extensions, accepting states of both base and extending automata are accepting states of " + }, + { + "bbox": [ + 42, + 297, + 442, + 416 + ], + "type": "inline_equation", + "content": "A_{e}" + }, + { + "bbox": [ + 42, + 297, + 442, + 416 + ], + "type": "text", + "content": ". To implement extensions, we start from the base expression automata and recursively adjust " + }, + { + "bbox": [ + 42, + 297, + 442, + 416 + ], + "type": "inline_equation", + "content": "A_{e}" + }, + { + "bbox": [ + 42, + 297, + 442, + 416 + ], + "type": "text", + "content": "'s transition function " + }, + { + "bbox": [ + 42, + 297, + 442, + 416 + ], + "type": "inline_equation", + "content": "\\delta_{e}" + }, + { + "bbox": [ + 42, + 297, + 442, + 416 + ], + "type": "text", + "content": " by adding outgoing edges from the accepting states of the current automaton to the initial states of the extending automata, or formally:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 77, + 421, + 404, + 454 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 421, + 404, + 454 + ], + "spans": [ + { + "bbox": [ + 77, + 421, + 404, + 454 + ], + "type": "interline_equation", + "content": "\\forall X, Y: \\delta_ {e} (q _ {Y} ^ {X}, c) := \\left\\{ \\begin{array}{l l} \\delta_ {Y} (q _ {Y} ^ {X}, c) \\cup \\delta_ {e} (I _ {(\\overline {{e}})} ^ {X}, c) \\cup \\delta_ {e} (I _ {\\odot e} ^ {X}, c) \\cup \\delta_ {e} (I _ {. n} ^ {X}, c) & \\text {i f q _ {Y} ^ {X} \\in F _ {Y}} \\\\ \\delta_ {Y} (q _ {Y} ^ {X}, c) & \\text {o t h e r w i s e ,} \\end{array} \\right.", + "image_path": "5a33822d9ca45c5300fff9da50bd03a045de3900a76ddac386ac07c04f35eed1.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "spans": [ + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "text", + "content": "where the labels " + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "inline_equation", + "content": "Y" + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "text", + "content": " for a state " + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "inline_equation", + "content": "q_{Y}^{X}" + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "text", + "content": " represent that a string " + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "text", + "content": " has been parsed, and currently the active automaton is " + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "inline_equation", + "content": "A_{Y}" + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "text", + "content": ", which can be one of the following: " + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "inline_equation", + "content": "A_{x}, A_{l}, A_{(e)}, A_{(\\overline{p})} \\Rightarrow e, A_{\\odot e}, A_{.n}" + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "inline_equation", + "content": "A_{(\\overline{e})}" + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "text", + "content": ". The superscripts are useful for tracking the currently expressed type, enabling us to determine the validity of extensions and transition to type-restricted expressions based on " + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "inline_equation", + "content": "L_{B}" + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "text", + "content": "'s typing rules. For instance, for state " + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "inline_equation", + "content": "q^{42}" + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "text", + "content": ", the addition operator extension " + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "inline_equation", + "content": "+e" + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "text", + "content": " and function call extension " + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "inline_equation", + "content": "(\\overline{e})" + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "text", + "content": " are syntactically applicable to 42 of type number. While the addition operator with type signature number + number :number is allowed, we can not apply a function call on number. In general, we set " + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "inline_equation", + "content": "I_{Y}^{X} := \\emptyset" + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "text", + "content": " when " + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "inline_equation", + "content": "Y" + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "text", + "content": " is an invalid extension to " + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "text", + "content": ". Moreover, for the extension " + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "inline_equation", + "content": "+e" + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "text", + "content": " to be valid, " + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "inline_equation", + "content": "e" + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "text", + "content": " must be of type number. To this end, we transition to a type-restricted expression automaton by setting " + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "inline_equation", + "content": "I_{+e}^{42}" + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "text", + "content": " to the set of initial states for " + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "inline_equation", + "content": "A_{+} \\circ (A_{e} \\downarrow \\text{number})" + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "text", + "content": ". Similar to the recursive type automaton, our definition of " + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "inline_equation", + "content": "A_{e}" + }, + { + "bbox": [ + 42, + 460, + 441, + 591 + ], + "type": "text", + "content": " ensures a finite set of initial states and a decidable accepting set." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 42, + 599, + 442, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 599, + 442, + 659 + ], + "spans": [ + { + "bbox": [ + 42, + 599, + 442, + 659 + ], + "type": "text", + "content": "Type-Constrained Expressions. To implement " + }, + { + "bbox": [ + 42, + 599, + 442, + 659 + ], + "type": "inline_equation", + "content": "A_{e} \\downarrow T" + }, + { + "bbox": [ + 42, + 599, + 442, + 659 + ], + "type": "text", + "content": ", we must determine whether a partial expression " + }, + { + "bbox": [ + 42, + 599, + 442, + 659 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 42, + 599, + 442, + 659 + ], + "type": "text", + "content": " can be completed to inhabit type " + }, + { + "bbox": [ + 42, + 599, + 442, + 659 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 42, + 599, + 442, + 659 + ], + "type": "text", + "content": ". Completing " + }, + { + "bbox": [ + 42, + 599, + 442, + 659 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 42, + 599, + 442, + 659 + ], + "type": "text", + "content": " without any extension can lead to a possible set of types and repeated extensions can further alter the result type, but we are not guaranteed that the desired type can be reached. Moreover, extensions can be applied indefinitely, prohibiting an exhaustive search of possible completions." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 44, + 59, + 241, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 59, + 241, + 69 + ], + "spans": [ + { + "bbox": [ + 44, + 59, + 241, + 69 + ], + "type": "text", + "content": "Type-Constrained Code Generation with Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 419, + 60, + 440, + 68 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 419, + 60, + 440, + 68 + ], + "spans": [ + { + "bbox": [ + 419, + 60, + 440, + 68 + ], + "type": "text", + "content": "171:11" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 42, + 84, + 442, + 146 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 84, + 442, + 146 + ], + "spans": [ + { + "bbox": [ + 42, + 84, + 442, + 146 + ], + "type": "text", + "content": "We therefore develop a two-tiered algorithm, which we describe in the following paragraphs. This algorithm first identifies the derivable types DERIVABLE " + }, + { + "bbox": [ + 42, + 84, + 442, + 146 + ], + "type": "inline_equation", + "content": "(q_{s})" + }, + { + "bbox": [ + 42, + 84, + 442, + 146 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 42, + 84, + 442, + 146 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 42, + 84, + 442, + 146 + ], + "type": "text", + "content": " based on its current state " + }, + { + "bbox": [ + 42, + 84, + 442, + 146 + ], + "type": "inline_equation", + "content": "q_{s}" + }, + { + "bbox": [ + 42, + 84, + 442, + 146 + ], + "type": "text", + "content": ". DERIVABLE " + }, + { + "bbox": [ + 42, + 84, + 442, + 146 + ], + "type": "inline_equation", + "content": "(q_{s})" + }, + { + "bbox": [ + 42, + 84, + 442, + 146 + ], + "type": "text", + "content": " refers to the set of inhabitable types for all possible expressions completed from " + }, + { + "bbox": [ + 42, + 84, + 442, + 146 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 42, + 84, + 442, + 146 + ], + "type": "text", + "content": " without extension. Second, a type reachability search REACHABLE " + }, + { + "bbox": [ + 42, + 84, + 442, + 146 + ], + "type": "inline_equation", + "content": "(\\text{DERIVABLE}(q_{e}), T)" + }, + { + "bbox": [ + 42, + 84, + 442, + 146 + ], + "type": "text", + "content": " is performed to determine if " + }, + { + "bbox": [ + 42, + 84, + 442, + 146 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 42, + 84, + 442, + 146 + ], + "type": "text", + "content": " can be inhabited by extending from the derivable types of " + }, + { + "bbox": [ + 42, + 84, + 442, + 146 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 42, + 84, + 442, + 146 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 42, + 145, + 442, + 254 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 145, + 442, + 254 + ], + "spans": [ + { + "bbox": [ + 42, + 145, + 442, + 254 + ], + "type": "text", + "content": "We prune automaton transitions when this type search returns a negative result. To ensure the prefix property, the performed search is sound, i.e., it only returns a positive result if " + }, + { + "bbox": [ + 42, + 145, + 442, + 254 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 42, + 145, + 442, + 254 + ], + "type": "text", + "content": " can be expressed by a valid sequence of extensions. This also aligns with our goal of generating only well-typed programs, ensuring that our expression automata accept a subset of all well-typed expressions of " + }, + { + "bbox": [ + 42, + 145, + 442, + 254 + ], + "type": "inline_equation", + "content": "L_{B}" + }, + { + "bbox": [ + 42, + 145, + 442, + 254 + ], + "type": "text", + "content": ". To ensure termination, the search is incomplete, i.e., there may be a valid sequence of transitions to express " + }, + { + "bbox": [ + 42, + 145, + 442, + 254 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 42, + 145, + 442, + 254 + ], + "type": "text", + "content": " which is not found by the search and we may end up disallowing generation of a well-typed expression. However, it only avoids traversing types of high complexity that are less likely to occur in practice. We further empirically ensure that our approach is practically effective (§5)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 258, + 208, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 258, + 208, + 413 + ], + "spans": [ + { + "bbox": [ + 44, + 258, + 208, + 413 + ], + "type": "text", + "content": "Derivable Types. For the first part of the algorithm, we determine all types inhabitable by the currently parsed expression " + }, + { + "bbox": [ + 44, + 258, + 208, + 413 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 44, + 258, + 208, + 413 + ], + "type": "text", + "content": " without extension, i.e., " + }, + { + "bbox": [ + 44, + 258, + 208, + 413 + ], + "type": "inline_equation", + "content": "\\mathrm{DERIVABLE}(q_s)" + }, + { + "bbox": [ + 44, + 258, + 208, + 413 + ], + "type": "text", + "content": ". For example, while parsing partial identifier " + }, + { + "bbox": [ + 44, + 258, + 208, + 413 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 44, + 258, + 208, + 413 + ], + "type": "text", + "content": " in the type environment " + }, + { + "bbox": [ + 44, + 258, + 208, + 413 + ], + "type": "inline_equation", + "content": "\\Gamma := \\{(x : \\text{number}), (xy : \\text{string})\\}" + }, + { + "bbox": [ + 44, + 258, + 208, + 413 + ], + "type": "text", + "content": ", we have " + }, + { + "bbox": [ + 44, + 258, + 208, + 413 + ], + "type": "inline_equation", + "content": "\\mathrm{DERIVABLE}(q_x) = \\{\\text{number}, \\text{string}\\}" + }, + { + "bbox": [ + 44, + 258, + 208, + 413 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 44, + 258, + 208, + 413 + ], + "type": "inline_equation", + "content": "\\mathrm{DERIVABLE}(q_{xy}) = \\{\\text{string}\\}" + }, + { + "bbox": [ + 44, + 258, + 208, + 413 + ], + "type": "text", + "content": ". For a final state " + }, + { + "bbox": [ + 44, + 258, + 208, + 413 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 44, + 258, + 208, + 413 + ], + "type": "text", + "content": " of expression " + }, + { + "bbox": [ + 44, + 258, + 208, + 413 + ], + "type": "inline_equation", + "content": "e" + }, + { + "bbox": [ + 44, + 258, + 208, + 413 + ], + "type": "text", + "content": ", we define " + }, + { + "bbox": [ + 44, + 258, + 208, + 413 + ], + "type": "inline_equation", + "content": "\\mathrm{DERIVABLE}(q) := T" + }, + { + "bbox": [ + 44, + 258, + 208, + 413 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 44, + 258, + 208, + 413 + ], + "type": "inline_equation", + "content": "\\Gamma \\vdash e : T" + }, + { + "bbox": [ + 44, + 258, + 208, + 413 + ], + "type": "text", + "content": ". Different expressions impose different" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 42, + 413, + 442, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 413, + 442, + 462 + ], + "spans": [ + { + "bbox": [ + 42, + 413, + 442, + 462 + ], + "type": "text", + "content": "rules on derivability, and we present the detailed rules in Table 1. Note that for grouped expressions and function literals, we need to enumerate reachable types by recursively contained expressions. To avoid explicitly enumerating all reachable types, we integrate the derivability and reachability algorithms. This optimization is discussed in more detail in Appendix A.4." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 43, + 468, + 442, + 493 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 468, + 442, + 493 + ], + "spans": [ + { + "bbox": [ + 43, + 468, + 442, + 493 + ], + "type": "text", + "content": "LEMMA 2. For state " + }, + { + "bbox": [ + 43, + 468, + 442, + 493 + ], + "type": "inline_equation", + "content": "q \\in \\gamma(I_e, s)" + }, + { + "bbox": [ + 43, + 468, + 442, + 493 + ], + "type": "text", + "content": " of partial expression " + }, + { + "bbox": [ + 43, + 468, + 442, + 493 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 43, + 468, + 442, + 493 + ], + "type": "text", + "content": ", DERIVABLE(" + }, + { + "bbox": [ + 43, + 468, + 442, + 493 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 43, + 468, + 442, + 493 + ], + "type": "text", + "content": ") returns all " + }, + { + "bbox": [ + 43, + 468, + 442, + 493 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 43, + 468, + 442, + 493 + ], + "type": "text", + "content": " s.t. exists some suffix " + }, + { + "bbox": [ + 43, + 468, + 442, + 493 + ], + "type": "inline_equation", + "content": "s'" + }, + { + "bbox": [ + 43, + 468, + 442, + 493 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 43, + 468, + 442, + 493 + ], + "type": "inline_equation", + "content": "\\Gamma \\vdash s \\circ s': T" + }, + { + "bbox": [ + 43, + 468, + 442, + 493 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 43, + 468, + 442, + 493 + ], + "type": "inline_equation", + "content": "s'" + }, + { + "bbox": [ + 43, + 468, + 442, + 493 + ], + "type": "text", + "content": " does not involve an extension (operator, call, or member access)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 498, + 347, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 498, + 347, + 511 + ], + "spans": [ + { + "bbox": [ + 52, + 498, + 347, + 511 + ], + "type": "text", + "content": "PROOF. By case distinction on the possible states of partial expressions." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 42, + 515, + 442, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 515, + 442, + 660 + ], + "spans": [ + { + "bbox": [ + 42, + 515, + 442, + 660 + ], + "type": "text", + "content": "Type Reachability. To determine which types are inhabitable by extending a base expression " + }, + { + "bbox": [ + 42, + 515, + 442, + 660 + ], + "type": "inline_equation", + "content": "e" + }, + { + "bbox": [ + 42, + 515, + 442, + 660 + ], + "type": "text", + "content": " of a given type " + }, + { + "bbox": [ + 42, + 515, + 442, + 660 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 42, + 515, + 442, + 660 + ], + "type": "text", + "content": " (with binary operator, function call, or member access), we analyze sequences of single extension steps with compatible signatures. This process is conceptualized as a search over a graph where types are nodes and extension steps are edges. For every binary operator " + }, + { + "bbox": [ + 42, + 515, + 442, + 660 + ], + "type": "inline_equation", + "content": "\\odot" + }, + { + "bbox": [ + 42, + 515, + 442, + 660 + ], + "type": "text", + "content": " with the signature " + }, + { + "bbox": [ + 42, + 515, + 442, + 660 + ], + "type": "inline_equation", + "content": "T \\odot X : S" + }, + { + "bbox": [ + 42, + 515, + 442, + 660 + ], + "type": "text", + "content": ", an edge is created from type " + }, + { + "bbox": [ + 42, + 515, + 442, + 660 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 42, + 515, + 442, + 660 + ], + "type": "text", + "content": " to type " + }, + { + "bbox": [ + 42, + 515, + 442, + 660 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 42, + 515, + 442, + 660 + ], + "type": "text", + "content": ". As an example, the operator for numerical addition " + }, + { + "bbox": [ + 42, + 515, + 442, + 660 + ], + "type": "inline_equation", + "content": "+" + }, + { + "bbox": [ + 42, + 515, + 442, + 660 + ], + "type": "text", + "content": " has the signature number " + }, + { + "bbox": [ + 42, + 515, + 442, + 660 + ], + "type": "inline_equation", + "content": "+" + }, + { + "bbox": [ + 42, + 515, + 442, + 660 + ], + "type": "text", + "content": " number: number, thereby forming an edge from number to itself. Furthermore, for every member " + }, + { + "bbox": [ + 42, + 515, + 442, + 660 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 42, + 515, + 442, + 660 + ], + "type": "text", + "content": " of type " + }, + { + "bbox": [ + 42, + 515, + 442, + 660 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 42, + 515, + 442, + 660 + ], + "type": "text", + "content": ", we create an edge from " + }, + { + "bbox": [ + 42, + 515, + 442, + 660 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 42, + 515, + 442, + 660 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 42, + 515, + 442, + 660 + ], + "type": "inline_equation", + "content": "\\text{LOOKUP}(T, n)" + }, + { + "bbox": [ + 42, + 515, + 442, + 660 + ], + "type": "text", + "content": ", e.g., from number to " + }, + { + "bbox": [ + 42, + 515, + 442, + 660 + ], + "type": "inline_equation", + "content": "() =>" + }, + { + "bbox": [ + 42, + 515, + 442, + 660 + ], + "type": "text", + "content": " string for the member to string of number type. Finally, we connect each function type " + }, + { + "bbox": [ + 42, + 515, + 442, + 660 + ], + "type": "inline_equation", + "content": "(\\overline{p}) => R" + }, + { + "bbox": [ + 42, + 515, + 442, + 660 + ], + "type": "text", + "content": " and with its return type " + }, + { + "bbox": [ + 42, + 515, + 442, + 660 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 42, + 515, + 442, + 660 + ], + "type": "text", + "content": ". For instance, " + }, + { + "bbox": [ + 42, + 515, + 442, + 660 + ], + "type": "inline_equation", + "content": "() =>" + }, + { + "bbox": [ + 42, + 515, + 442, + 660 + ], + "type": "text", + "content": " string is connected with string. Examples of type graphs can be found in §2.2 and Figure 3. Note that these extension steps are abstract, in the sense that they focus on the type of the expression being extended and the resulting type after extension, not considering textual representation and parameters." + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 212, + 307, + 440, + 408 + ], + "blocks": [ + { + "bbox": [ + 212, + 261, + 442, + 306 + ], + "lines": [ + { + "bbox": [ + 212, + 261, + 442, + 306 + ], + "spans": [ + { + "bbox": [ + 212, + 261, + 442, + 306 + ], + "type": "text", + "content": "Table 1. Definition of DERIVABLE(x) for partial expressions introduced in Figure 4. " + }, + { + "bbox": [ + 212, + 261, + 442, + 306 + ], + "type": "inline_equation", + "content": "s \\leq s'" + }, + { + "bbox": [ + 212, + 261, + 442, + 306 + ], + "type": "text", + "content": " expresses that " + }, + { + "bbox": [ + 212, + 261, + 442, + 306 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 212, + 261, + 442, + 306 + ], + "type": "text", + "content": " is a prefix of " + }, + { + "bbox": [ + 212, + 261, + 442, + 306 + ], + "type": "inline_equation", + "content": "s'" + }, + { + "bbox": [ + 212, + 261, + 442, + 306 + ], + "type": "text", + "content": ". pmatch(s, T) determines whether a prefix " + }, + { + "bbox": [ + 212, + 261, + 442, + 306 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 212, + 261, + 442, + 306 + ], + "type": "text", + "content": " partially matches the regular expression of literals of type " + }, + { + "bbox": [ + 212, + 261, + 442, + 306 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 212, + 261, + 442, + 306 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 212, + 307, + 440, + 408 + ], + "lines": [ + { + "bbox": [ + 212, + 307, + 440, + 408 + ], + "spans": [ + { + "bbox": [ + 212, + 307, + 440, + 408 + ], + "type": "table", + "html": "
sDERIVABLE(qs)
l{T | pmatch(l,T),T ∈ {number, string, boolean}}
x{T | x ≤ n, (n : T) ∈ Γ}
(¯p) => e{ (¯p) => T | REACHABLE(DERIVABLE(qe),T)}
(e{T | REACHABLE(DERIVABLE(qe),T)}
e ⊙{T | ∃S': Γ ↔ e : S ∧ S ⊕ S': T}
e({R | Γ ↔ e: (¯p) => R}
e.a{S | a ≤ n, Γ ↔ e : T, LOOKUP(T,n) = S}
", + "image_path": "72e91109dcf5c43d35127ada7978584ddc3a01d52f248a6f41efc03e3062ce21.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 44, + 60, + 67, + 69 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 60, + 67, + 69 + ], + "spans": [ + { + "bbox": [ + 44, + 60, + 67, + 69 + ], + "type": "text", + "content": "171:12" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 165, + 59, + 441, + 71 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 59, + 441, + 71 + ], + "spans": [ + { + "bbox": [ + 165, + 59, + 441, + 71 + ], + "type": "text", + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 43, + 83, + 442, + 213 + ], + "blocks": [ + { + "bbox": [ + 43, + 83, + 442, + 213 + ], + "lines": [ + { + "bbox": [ + 43, + 83, + 442, + 213 + ], + "spans": [ + { + "bbox": [ + 43, + 83, + 442, + 213 + ], + "type": "table", + "html": "
Algorithm 2 Our type reachability search algorithm
Input: Current type T of some expression e, goal type G
Output: Whether G can be reached by extending e
1: function REACHABLE(T, G)
2: if T = G then return true▷ The goal type is successfully found
3: if T is marked then return false else mark T▷ Type T is marked to avoid cycles
4: for each valid extension step ⌿ from T do
5: S := the resulting type of applying ⌿ on T
6: if PRUNESEARCH(T, G, S) continue▷ Prune the search to ensure termination
7: if REACHABLE(S, G) return true▷ Recurse to the next round of extension
8: return false▷ No suitable extension is found
", + "image_path": "a30990f5fbd78a3d5bd03abb5291ed565e72139dbd4b176396fd845291662c23.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 42, + 231, + 442, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 231, + 442, + 315 + ], + "spans": [ + { + "bbox": [ + 42, + 231, + 442, + 315 + ], + "type": "text", + "content": "The type reachability algorithm, Algorithm 2, implements a depth-first search over this type graph, starting from the current type " + }, + { + "bbox": [ + 42, + 231, + 442, + 315 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 42, + 231, + 442, + 315 + ], + "type": "text", + "content": ", succeeding upon finding goal type " + }, + { + "bbox": [ + 42, + 231, + 442, + 315 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 42, + 231, + 442, + 315 + ], + "type": "text", + "content": " (Line 2), marking any visited types to prevent cycles (Line 3). Then, it proceeds to iterate over all valid extension steps from " + }, + { + "bbox": [ + 42, + 231, + 442, + 315 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 42, + 231, + 442, + 315 + ], + "type": "text", + "content": " (Line 4) and computes the resulting type " + }, + { + "bbox": [ + 42, + 231, + 442, + 315 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 42, + 231, + 442, + 315 + ], + "type": "text", + "content": " after the extension step is applied (Line 5). In the conceptualized type graph, as described in the previous paragraph, this is equivalent to exploring all outgoing edges from " + }, + { + "bbox": [ + 42, + 231, + 442, + 315 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 42, + 231, + 442, + 315 + ], + "type": "text", + "content": ". At Line 7, we proceed to recursively search if " + }, + { + "bbox": [ + 42, + 231, + 442, + 315 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 42, + 231, + 442, + 315 + ], + "type": "text", + "content": " can reach " + }, + { + "bbox": [ + 42, + 231, + 442, + 315 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 42, + 231, + 442, + 315 + ], + "type": "text", + "content": ". If all recursive calls are unsuccessful, the goal type can not be reached (Line 8)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 42, + 316, + 442, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 316, + 442, + 470 + ], + "spans": [ + { + "bbox": [ + 42, + 316, + 442, + 470 + ], + "type": "text", + "content": "Some programming languages define self-referential default members, e.g., clone in Java or value0f in TypeScript, which are nullary functions that return a value of the same type as the callee, " + }, + { + "bbox": [ + 42, + 316, + 442, + 470 + ], + "type": "inline_equation", + "content": "(\\mathbf{\\beta})\\Rightarrow T" + }, + { + "bbox": [ + 42, + 316, + 442, + 470 + ], + "type": "text", + "content": " for type " + }, + { + "bbox": [ + 42, + 316, + 442, + 470 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 42, + 316, + 442, + 470 + ], + "type": "text", + "content": ". When these members are accessed in functions, higher-order functions can be derived indefinitely. For instance, for a function " + }, + { + "bbox": [ + 42, + 316, + 442, + 470 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 42, + 316, + 442, + 470 + ], + "type": "text", + "content": " with type " + }, + { + "bbox": [ + 42, + 316, + 442, + 470 + ], + "type": "inline_equation", + "content": "(\\mathbf{\\beta})\\Rightarrow S" + }, + { + "bbox": [ + 42, + 316, + 442, + 470 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 42, + 316, + 442, + 470 + ], + "type": "inline_equation", + "content": "f." + }, + { + "bbox": [ + 42, + 316, + 442, + 470 + ], + "type": "text", + "content": " value0f has the type " + }, + { + "bbox": [ + 42, + 316, + 442, + 470 + ], + "type": "inline_equation", + "content": "(\\mathbf{\\beta})\\Rightarrow (\\mathbf{\\beta})\\Rightarrow S" + }, + { + "bbox": [ + 42, + 316, + 442, + 470 + ], + "type": "text", + "content": ". We therefore need to restrict the type search to a finite set of types to ensure termination. At Line 6 of Algorithm 2, we add a heuristic PRUNESEARCH into the search, which decides where to prune the search process. We develop a simple heuristic based on the results from Gvero et al. [30]. This heuristic prunes exploration of types with higher complexity than goal or source type if they do not contain yet unexplored primitive types, thus preventing exploration of arbitrarily complex types. The details of this heuristic are presented in Appendix A.3. While ensuring termination, our heuristic leads to incompleteness and the potential rejection of well-typed expressions. However, this effect is less pronounced in practical usage, as only highly complex (thus less realistically used) types are avoided." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 471, + 293, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 471, + 293, + 484 + ], + "spans": [ + { + "bbox": [ + 52, + 471, + 293, + 484 + ], + "type": "text", + "content": "We proceed to prove the soundness of Algorithm 2 below." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 43, + 489, + 443, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 489, + 443, + 514 + ], + "spans": [ + { + "bbox": [ + 43, + 489, + 443, + 514 + ], + "type": "text", + "content": "LEMMA 3. The type search in Algorithm 2 is sound, i.e., for any expression " + }, + { + "bbox": [ + 43, + 489, + 443, + 514 + ], + "type": "inline_equation", + "content": "e" + }, + { + "bbox": [ + 43, + 489, + 443, + 514 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 43, + 489, + 443, + 514 + ], + "type": "inline_equation", + "content": "\\Gamma \\vdash e : T" + }, + { + "bbox": [ + 43, + 489, + 443, + 514 + ], + "type": "text", + "content": ", if REACHABLE(T,G) holds, then there exists a sequence of extensions " + }, + { + "bbox": [ + 43, + 489, + 443, + 514 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 43, + 489, + 443, + 514 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 43, + 489, + 443, + 514 + ], + "type": "inline_equation", + "content": "\\Gamma \\vdash e \\circ y : G" + }, + { + "bbox": [ + 43, + 489, + 443, + 514 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 42, + 519, + 443, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 519, + 443, + 593 + ], + "spans": [ + { + "bbox": [ + 42, + 519, + 443, + 593 + ], + "type": "text", + "content": "Proof. By the design of Algorithm 2, if REACHABLE" + }, + { + "bbox": [ + 42, + 519, + 443, + 593 + ], + "type": "inline_equation", + "content": "(T,G)" + }, + { + "bbox": [ + 42, + 519, + 443, + 593 + ], + "type": "text", + "content": " returns true, there is a sequence of " + }, + { + "bbox": [ + 42, + 519, + 443, + 593 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 42, + 519, + 443, + 593 + ], + "type": "text", + "content": " recursive calls to REACHABLE" + }, + { + "bbox": [ + 42, + 519, + 443, + 593 + ], + "type": "inline_equation", + "content": "(T_i,G)" + }, + { + "bbox": [ + 42, + 519, + 443, + 593 + ], + "type": "text", + "content": ", with " + }, + { + "bbox": [ + 42, + 519, + 443, + 593 + ], + "type": "inline_equation", + "content": "T_0 = T" + }, + { + "bbox": [ + 42, + 519, + 443, + 593 + ], + "type": "text", + "content": " and REACHABLE" + }, + { + "bbox": [ + 42, + 519, + 443, + 593 + ], + "type": "inline_equation", + "content": "(T_n,G) = \\text{true}" + }, + { + "bbox": [ + 42, + 519, + 443, + 593 + ], + "type": "text", + "content": ". Each " + }, + { + "bbox": [ + 42, + 519, + 443, + 593 + ], + "type": "inline_equation", + "content": "T_i" + }, + { + "bbox": [ + 42, + 519, + 443, + 593 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 42, + 519, + 443, + 593 + ], + "type": "inline_equation", + "content": "i > 0" + }, + { + "bbox": [ + 42, + 519, + 443, + 593 + ], + "type": "text", + "content": ") is derived because some extension " + }, + { + "bbox": [ + 42, + 519, + 443, + 593 + ], + "type": "inline_equation", + "content": "\\diamond_i" + }, + { + "bbox": [ + 42, + 519, + 443, + 593 + ], + "type": "text", + "content": " is applicable to " + }, + { + "bbox": [ + 42, + 519, + 443, + 593 + ], + "type": "inline_equation", + "content": "T_{i-1}" + }, + { + "bbox": [ + 42, + 519, + 443, + 593 + ], + "type": "text", + "content": " based on the typing rules of " + }, + { + "bbox": [ + 42, + 519, + 443, + 593 + ], + "type": "inline_equation", + "content": "L_B" + }, + { + "bbox": [ + 42, + 519, + 443, + 593 + ], + "type": "text", + "content": ". We then convert each " + }, + { + "bbox": [ + 42, + 519, + 443, + 593 + ], + "type": "inline_equation", + "content": "\\diamond_i" + }, + { + "bbox": [ + 42, + 519, + 443, + 593 + ], + "type": "text", + "content": " to its concrete, textual version " + }, + { + "bbox": [ + 42, + 519, + 443, + 593 + ], + "type": "inline_equation", + "content": "\\spadesuit_i" + }, + { + "bbox": [ + 42, + 519, + 443, + 593 + ], + "type": "text", + "content": ". This representation includes the required well-typed parameters of " + }, + { + "bbox": [ + 42, + 519, + 443, + 593 + ], + "type": "inline_equation", + "content": "\\spadesuit_i" + }, + { + "bbox": [ + 42, + 519, + 443, + 593 + ], + "type": "text", + "content": " (i.e., for binary operators and non-nullary functions), which are constructed using literals. Finally, we construct " + }, + { + "bbox": [ + 42, + 519, + 443, + 593 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 42, + 519, + 443, + 593 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 42, + 519, + 443, + 593 + ], + "type": "inline_equation", + "content": "\\spadesuit_1 \\circ \\ldots \\circ \\spadesuit_n" + }, + { + "bbox": [ + 42, + 519, + 443, + 593 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 42, + 599, + 443, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 599, + 443, + 660 + ], + "spans": [ + { + "bbox": [ + 42, + 599, + 443, + 660 + ], + "type": "text", + "content": "Note that using any pruning heuristic at Line 6 of Algorithm 2 preserves soundness, which in turn is sufficient to preserve the required prefix property, as defined in Definition 2. We can conclude that the two-tiered search algorithm soundly determines whether the desired target type can be derived from some partial input. Therefore, we conclude that " + }, + { + "bbox": [ + 42, + 599, + 443, + 660 + ], + "type": "inline_equation", + "content": "A_{e} \\downarrow T" + }, + { + "bbox": [ + 42, + 599, + 443, + 660 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 42, + 599, + 443, + 660 + ], + "type": "inline_equation", + "content": "A_{e}" + }, + { + "bbox": [ + 42, + 599, + 443, + 660 + ], + "type": "text", + "content": " are prefix automata that parse a subset of well-typed expressions in " + }, + { + "bbox": [ + 42, + 599, + 443, + 660 + ], + "type": "inline_equation", + "content": "L_{B}" + }, + { + "bbox": [ + 42, + 599, + 443, + 660 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 43, + 59, + 241, + 71 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 59, + 241, + 71 + ], + "spans": [ + { + "bbox": [ + 43, + 59, + 241, + 71 + ], + "type": "text", + "content": "Type-Constrained Code Generation with Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 419, + 60, + 441, + 69 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 419, + 60, + 441, + 69 + ], + "spans": [ + { + "bbox": [ + 419, + 60, + 441, + 69 + ], + "type": "text", + "content": "171:13" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 84, + 441, + 109 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 84, + 441, + 109 + ], + "spans": [ + { + "bbox": [ + 44, + 84, + 441, + 109 + ], + "type": "text", + "content": "COROLLARY 4. If REACHABLE( DERIVABLE(q), G) holds for any " + }, + { + "bbox": [ + 44, + 84, + 441, + 109 + ], + "type": "inline_equation", + "content": "q \\in \\gamma(I_e, s)" + }, + { + "bbox": [ + 44, + 84, + 441, + 109 + ], + "type": "text", + "content": " of a partial expression " + }, + { + "bbox": [ + 44, + 84, + 441, + 109 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 44, + 84, + 441, + 109 + ], + "type": "text", + "content": ", then there exists a suffix " + }, + { + "bbox": [ + 44, + 84, + 441, + 109 + ], + "type": "inline_equation", + "content": "s'" + }, + { + "bbox": [ + 44, + 84, + 441, + 109 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 44, + 84, + 441, + 109 + ], + "type": "inline_equation", + "content": "\\Gamma \\vdash s \\circ s': G" + }, + { + "bbox": [ + 44, + 84, + 441, + 109 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 54, + 116, + 311, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 116, + 311, + 128 + ], + "spans": [ + { + "bbox": [ + 54, + 116, + 311, + 128 + ], + "type": "text", + "content": "Proof. This conclusion follows directly from Lemmas 2 and 3." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 137, + 441, + 173 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 137, + 441, + 173 + ], + "spans": [ + { + "bbox": [ + 44, + 137, + 441, + 173 + ], + "type": "text", + "content": "LEMMA 5. The language parsed by " + }, + { + "bbox": [ + 44, + 137, + 441, + 173 + ], + "type": "inline_equation", + "content": "A_e \\downarrow T" + }, + { + "bbox": [ + 44, + 137, + 441, + 173 + ], + "type": "text", + "content": " is thus a subset of the expressions of " + }, + { + "bbox": [ + 44, + 137, + 441, + 173 + ], + "type": "inline_equation", + "content": "L_B" + }, + { + "bbox": [ + 44, + 137, + 441, + 173 + ], + "type": "text", + "content": " of type " + }, + { + "bbox": [ + 44, + 137, + 441, + 173 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 44, + 137, + 441, + 173 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 44, + 137, + 441, + 173 + ], + "type": "inline_equation", + "content": "L(A_e \\downarrow T) \\subseteq \\{s \\mid \\Gamma \\vdash s : T\\}" + }, + { + "bbox": [ + 44, + 137, + 441, + 173 + ], + "type": "text", + "content": ". Since " + }, + { + "bbox": [ + 44, + 137, + 441, + 173 + ], + "type": "inline_equation", + "content": "A_e" + }, + { + "bbox": [ + 44, + 137, + 441, + 173 + ], + "type": "text", + "content": " recursively involves " + }, + { + "bbox": [ + 44, + 137, + 441, + 173 + ], + "type": "inline_equation", + "content": "A_e \\downarrow T" + }, + { + "bbox": [ + 44, + 137, + 441, + 173 + ], + "type": "text", + "content": ", the language parsed by " + }, + { + "bbox": [ + 44, + 137, + 441, + 173 + ], + "type": "inline_equation", + "content": "A_e" + }, + { + "bbox": [ + 44, + 137, + 441, + 173 + ], + "type": "text", + "content": " is also a subset of well-typed expressions of " + }, + { + "bbox": [ + 44, + 137, + 441, + 173 + ], + "type": "inline_equation", + "content": "L_B" + }, + { + "bbox": [ + 44, + 137, + 441, + 173 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 44, + 137, + 441, + 173 + ], + "type": "inline_equation", + "content": "L(A_e) \\subseteq \\{s \\mid \\exists T : \\Gamma \\vdash s : T\\}" + }, + { + "bbox": [ + 44, + 137, + 441, + 173 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 183, + 211, + 195 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 183, + 211, + 195 + ], + "spans": [ + { + "bbox": [ + 44, + 183, + 211, + 195 + ], + "type": "text", + "content": "3.5 Prefix Automata for Statements" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 198, + 441, + 294 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 198, + 441, + 294 + ], + "spans": [ + { + "bbox": [ + 44, + 198, + 441, + 294 + ], + "type": "text", + "content": "We define the remaining automata to capture the complete language " + }, + { + "bbox": [ + 44, + 198, + 441, + 294 + ], + "type": "inline_equation", + "content": "L_{B}" + }, + { + "bbox": [ + 44, + 198, + 441, + 294 + ], + "type": "text", + "content": ". The statement automaton is defined recursively as " + }, + { + "bbox": [ + 44, + 198, + 441, + 294 + ], + "type": "inline_equation", + "content": "A_{s} \\coloneqq A_{\\mathrm{DECL}} \\cup A_{\\mathrm{EXPR}} \\cup A_{\\mathrm{RET}} \\cup A_{\\mathrm{BLOCK}} \\cup A_{\\mathrm{FUN}} \\cup A_{\\mathrm{ITE}}" + }, + { + "bbox": [ + 44, + 198, + 441, + 294 + ], + "type": "text", + "content": ". The declaration automaton " + }, + { + "bbox": [ + 44, + 198, + 441, + 294 + ], + "type": "inline_equation", + "content": "A_{\\mathrm{DECL}} \\coloneqq A_{\\mathrm{let} x: T}" + }, + { + "bbox": [ + 44, + 198, + 441, + 294 + ], + "type": "text", + "content": "; captures undefined variable names " + }, + { + "bbox": [ + 44, + 198, + 441, + 294 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 44, + 198, + 441, + 294 + ], + "type": "text", + "content": " by accepting all strings, except for existing identifiers. This automaton is a prefix automaton since an accepting state can always be reached by appending characters to the declared identifier. The return statement automaton is " + }, + { + "bbox": [ + 44, + 198, + 441, + 294 + ], + "type": "inline_equation", + "content": "A_{\\emptyset}" + }, + { + "bbox": [ + 44, + 198, + 441, + 294 + ], + "type": "text", + "content": " when outside a function and restricts the parsed expression to the return type of the surrounding function otherwise. The remaining automata are mainly concatenations of previously defined automata and recursive invocations of " + }, + { + "bbox": [ + 44, + 198, + 441, + 294 + ], + "type": "inline_equation", + "content": "A_{s}" + }, + { + "bbox": [ + 44, + 198, + 441, + 294 + ], + "type": "text", + "content": ", with small variations detailed in Appendix A.5." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 300, + 441, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 300, + 441, + 373 + ], + "spans": [ + { + "bbox": [ + 44, + 300, + 441, + 373 + ], + "type": "text", + "content": "Tracking Type Environments. Generally, we follow the typing rules in Figure 6. Identifiers are passed on through all state transitions, matching the rule SEQ, where the type environment of consecutive statements needs to be compatible. However, in the cases of BLOCK, ITE and FUN, we discard the local type environment after parsing, matching the respective typing rules. In FUN additionally, the function signature and parameters are added into the type environment of the function body automaton, and the function signature in the environment of subsequent statements." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 44, + 379, + 441, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 379, + 441, + 463 + ], + "spans": [ + { + "bbox": [ + 44, + 379, + 441, + 463 + ], + "type": "text", + "content": "Guaranteeing Return Types. When parsing the body of a function, the transition function of the function automata " + }, + { + "bbox": [ + 44, + 379, + 441, + 463 + ], + "type": "inline_equation", + "content": "A_{\\mathrm{FUN}}" + }, + { + "bbox": [ + 44, + 379, + 441, + 463 + ], + "type": "text", + "content": " maintains information about the declared return type and the encountered return statements (if any). " + }, + { + "bbox": [ + 44, + 379, + 441, + 463 + ], + "type": "inline_equation", + "content": "A_{\\mathrm{FUN}}" + }, + { + "bbox": [ + 44, + 379, + 441, + 463 + ], + "type": "text", + "content": " only accepts states where all return values match the declared return type and all execution paths inside the function body return, following " + }, + { + "bbox": [ + 44, + 379, + 441, + 463 + ], + "type": "inline_equation", + "content": "L_B" + }, + { + "bbox": [ + 44, + 379, + 441, + 463 + ], + "type": "text", + "content": "'s typing rules in Figure 7. If the current generated statements do not return in all execution paths, another statement is forced to be generated. Since we can always express the requested type through literals, a correct return statement can always be generated and the prefix automaton property is not violated." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 44, + 463, + 441, + 524 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 463, + 441, + 524 + ], + "spans": [ + { + "bbox": [ + 44, + 463, + 441, + 524 + ], + "type": "text", + "content": "The described rules are straightforward to implement without violating the prefix property as all restrictions are derived only from already parsed input, e.g., the already defined identifiers or the previously declared function return type. We can therefore deduce that the statement automaton is a prefix automaton. Moreover, the automaton accepts all valid statements of " + }, + { + "bbox": [ + 44, + 463, + 441, + 524 + ], + "type": "inline_equation", + "content": "L_{B}" + }, + { + "bbox": [ + 44, + 463, + 441, + 524 + ], + "type": "text", + "content": ", with the exception of well-typed expressions rejected by " + }, + { + "bbox": [ + 44, + 463, + 441, + 524 + ], + "type": "inline_equation", + "content": "A_{e}" + }, + { + "bbox": [ + 44, + 463, + 441, + 524 + ], + "type": "text", + "content": ". Therefore the parsed language is a subset of " + }, + { + "bbox": [ + 44, + 463, + 441, + 524 + ], + "type": "inline_equation", + "content": "L_{B}" + }, + { + "bbox": [ + 44, + 463, + 441, + 524 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 54, + 530, + 381, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 530, + 381, + 542 + ], + "spans": [ + { + "bbox": [ + 54, + 530, + 381, + 542 + ], + "type": "text", + "content": "LEMMA 6. With " + }, + { + "bbox": [ + 54, + 530, + 381, + 542 + ], + "type": "inline_equation", + "content": "A_M \\coloneqq A_{\\overline{s}}" + }, + { + "bbox": [ + 54, + 530, + 381, + 542 + ], + "type": "text", + "content": " it holds that " + }, + { + "bbox": [ + 54, + 530, + 381, + 542 + ], + "type": "inline_equation", + "content": "A_M" + }, + { + "bbox": [ + 54, + 530, + 381, + 542 + ], + "type": "text", + "content": " is a prefix automaton and " + }, + { + "bbox": [ + 54, + 530, + 381, + 542 + ], + "type": "inline_equation", + "content": "L(A_M) \\subseteq L_B" + }, + { + "bbox": [ + 54, + 530, + 381, + 542 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 44, + 552, + 168, + 565 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 552, + 168, + 565 + ], + "spans": [ + { + "bbox": [ + 44, + 552, + 168, + 565 + ], + "type": "text", + "content": "4 Extension to TypeScript" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 44, + 567, + 441, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 567, + 441, + 604 + ], + "spans": [ + { + "bbox": [ + 44, + 567, + 441, + 604 + ], + "type": "text", + "content": "We extend our completion engine described in §3 to handle a core subset of modern TypeScript. In this section, we selectively discuss the implementation of several interesting TypeScript features. We provide a comprehensive list of supported and unsupported TypeScript features in Appendix B." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 44, + 610, + 441, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 610, + 441, + 658 + ], + "spans": [ + { + "bbox": [ + 44, + 610, + 441, + 658 + ], + "type": "text", + "content": "Constant Variable Declarations. In addition to variable declaration using let, TypeScript supports constant declarations using const. This defines immutable identifiers. We thus additionally track mutability of each identifier in the type environment and disallow applying the assignment operator to immutable identifiers." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 61, + 66, + 68 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 61, + 66, + 68 + ], + "spans": [ + { + "bbox": [ + 45, + 61, + 66, + 68 + ], + "type": "text", + "content": "171:14" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 165, + 60, + 441, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 60, + 441, + 69 + ], + "spans": [ + { + "bbox": [ + 165, + 60, + 441, + 69 + ], + "type": "text", + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 42, + 85, + 440, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 85, + 440, + 133 + ], + "spans": [ + { + "bbox": [ + 42, + 85, + 440, + 133 + ], + "type": "text", + "content": "Arrays. We add support for array type annotation, parsing array expressions, and reading from and assigning to array fields. In array expressions, we enforce that all array elements have the same type. Moreover, array types introduce another dimension of type nesting. Therefore we adapt the type reachability pruning heuristic to handle this additional dimension to ensure termination." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 42, + 138, + 442, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 138, + 442, + 199 + ], + "spans": [ + { + "bbox": [ + 42, + 138, + 442, + 199 + ], + "type": "text", + "content": "**Loops.** TypeScript supports various loop constructs, including for, while, do-while, and for...of loops. These are implemented mostly as variations of the statement block parser. The for...of loop uniquely constrains the right-hand side of the ..of operator to an array of any type. To adapt the type search, we introduce a generic array type " + }, + { + "bbox": [ + 42, + 138, + 442, + 199 + ], + "type": "inline_equation", + "content": "\\bullet[\\]" + }, + { + "bbox": [ + 42, + 138, + 442, + 199 + ], + "type": "text", + "content": ", which matches any array type. For example, both types number[] and string[] match " + }, + { + "bbox": [ + 42, + 138, + 442, + 199 + ], + "type": "inline_equation", + "content": "\\bullet[\\]" + }, + { + "bbox": [ + 42, + 138, + 442, + 199 + ], + "type": "text", + "content": " in Line 2 of Algorithm 2." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 42, + 205, + 442, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 205, + 442, + 253 + ], + "spans": [ + { + "bbox": [ + 42, + 205, + 442, + 253 + ], + "type": "text", + "content": "Additional Operators and Types. We add several arithmetic and logic operators, such as modulo " + }, + { + "bbox": [ + 42, + 205, + 442, + 253 + ], + "type": "inline_equation", + "content": "\\%" + }, + { + "bbox": [ + 42, + 205, + 442, + 253 + ], + "type": "text", + "content": " , exact equality " + }, + { + "bbox": [ + 42, + 205, + 442, + 253 + ], + "type": "inline_equation", + "content": "= = =" + }, + { + "bbox": [ + 42, + 205, + 442, + 253 + ], + "type": "text", + "content": " , logical or " + }, + { + "bbox": [ + 42, + 205, + 442, + 253 + ], + "type": "inline_equation", + "content": "||" + }, + { + "bbox": [ + 42, + 205, + 442, + 253 + ], + "type": "text", + "content": " , and the ternary operator " + }, + { + "bbox": [ + 42, + 205, + 442, + 253 + ], + "type": "inline_equation", + "content": "\\text{?}" + }, + { + "bbox": [ + 42, + 205, + 442, + 253 + ], + "type": "text", + "content": " :. To handle these operators, we add additional edges to the type search graph. Moreover, we add support for post- and prefix operators such as -- and ++, which are only valid extensions to mutable expressions." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 42, + 258, + 442, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 258, + 442, + 378 + ], + "spans": [ + { + "bbox": [ + 42, + 258, + 442, + 378 + ], + "type": "text", + "content": "Operator Precedence. TypeScript defines an operator precedence, which determines the implicit grouping of expressions. For example " + }, + { + "bbox": [ + 42, + 258, + 442, + 378 + ], + "type": "inline_equation", + "content": "1 + 2" + }, + { + "bbox": [ + 42, + 258, + 442, + 378 + ], + "type": "text", + "content": ". toString() is parsed as " + }, + { + "bbox": [ + 42, + 258, + 442, + 378 + ], + "type": "inline_equation", + "content": "1 + (2" + }, + { + "bbox": [ + 42, + 258, + 442, + 378 + ], + "type": "text", + "content": ". toString()) . We adapt our expression parsing algorithm in two places to handle operator precedences. First, in the expression automaton, we leverage the knowledge about previously parsed extensions to determine the implicit grouping and thus where the next operator is applied. For example, for state " + }, + { + "bbox": [ + 42, + 258, + 442, + 378 + ], + "type": "inline_equation", + "content": "q^{1} + 2" + }, + { + "bbox": [ + 42, + 258, + 442, + 378 + ], + "type": "text", + "content": ", the member access extension " + }, + { + "bbox": [ + 42, + 258, + 442, + 378 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 42, + 258, + 442, + 378 + ], + "type": "text", + "content": " is applied to 2, as opposed to " + }, + { + "bbox": [ + 42, + 258, + 442, + 378 + ], + "type": "inline_equation", + "content": "1 + 2" + }, + { + "bbox": [ + 42, + 258, + 442, + 378 + ], + "type": "text", + "content": ". Second, we adapt the type search in Algorithm 2. Concretely, we ensure that only extensions that can be validly applied based on operator precedence are iterated over. For this, we track the operator precedence of previously parsed extensions and extensions considered during the traversal of the type graph and omit operators in Line 5 that violate operator precedence." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 42, + 384, + 442, + 432 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 384, + 442, + 432 + ], + "spans": [ + { + "bbox": [ + 42, + 384, + 442, + 432 + ], + "type": "text", + "content": "Global Identifiers and Imports. In TypeScript, many identifiers are defined globally and available in any execution. These global identifiers are incorporated by initializing the type environment of the program automaton accordingly. Identifiers such as Math introduce additional types, which we additionally implement. We also model the import of the crypto library using require." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 42, + 438, + 442, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 438, + 442, + 498 + ], + "spans": [ + { + "bbox": [ + 42, + 438, + 442, + 498 + ], + "type": "text", + "content": "Polymorphic Built-In Members. The TypeScript LOOKUP implementation defines a few polymorphic members for built-in types. For example, for array " + }, + { + "bbox": [ + 42, + 438, + 442, + 498 + ], + "type": "inline_equation", + "content": "\\times" + }, + { + "bbox": [ + 42, + 438, + 442, + 498 + ], + "type": "text", + "content": " of type " + }, + { + "bbox": [ + 42, + 438, + 442, + 498 + ], + "type": "inline_equation", + "content": "T[]" + }, + { + "bbox": [ + 42, + 438, + 442, + 498 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 42, + 438, + 442, + 498 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 42, + 438, + 442, + 498 + ], + "type": "text", + "content": ". map(f) takes a callback function " + }, + { + "bbox": [ + 42, + 438, + 442, + 498 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 42, + 438, + 442, + 498 + ], + "type": "text", + "content": " and returns a new array " + }, + { + "bbox": [ + 42, + 438, + 442, + 498 + ], + "type": "inline_equation", + "content": "[f(x[0]), f(x[1]), \\ldots]" + }, + { + "bbox": [ + 42, + 438, + 442, + 498 + ], + "type": "text", + "content": ". If " + }, + { + "bbox": [ + 42, + 438, + 442, + 498 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 42, + 438, + 442, + 498 + ], + "type": "text", + "content": " has type " + }, + { + "bbox": [ + 42, + 438, + 442, + 498 + ], + "type": "inline_equation", + "content": "(T) => P" + }, + { + "bbox": [ + 42, + 438, + 442, + 498 + ], + "type": "text", + "content": ", the returned array has type " + }, + { + "bbox": [ + 42, + 438, + 442, + 498 + ], + "type": "inline_equation", + "content": "P[]" + }, + { + "bbox": [ + 42, + 438, + 442, + 498 + ], + "type": "text", + "content": ". Here " + }, + { + "bbox": [ + 42, + 438, + 442, + 498 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 42, + 438, + 442, + 498 + ], + "type": "text", + "content": " is a type parameter, which is instantiated by matching the type of the passed function to the type pattern." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 42, + 498, + 442, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 498, + 442, + 570 + ], + "spans": [ + { + "bbox": [ + 42, + 498, + 442, + 570 + ], + "type": "text", + "content": "We support such polymorphisms by adapting the type search. We track type patterns and enforce that type parameters are instantiated before the goal type is reached. We then continue the search from the instantiated version. In the map example, when searching completions of x.map, we first search for functions that instantiate the type parameter, and then continue the search from the instantiated type. When anonymous functions are generated as call parameters, we enforce that the function matches the searched type pattern." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 42, + 576, + 442, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 576, + 442, + 660 + ], + "spans": [ + { + "bbox": [ + 42, + 576, + 442, + 660 + ], + "type": "text", + "content": "Type Annotations. TypeScript is designed to be flexible, allowing many type annotations to be omitted when they can be automatically inferred. We generally support this, such as inferring types from initial values. However, it can lead to unexpected types when annotations are omitted, often confusing even experienced developers [47, 48]. Moreover, in the context of LLM-based code generation, having more type annotations can provide valuable information for both the model and our type-constraining algorithms. We have identified three situations where generated code often fails to compile without type annotations, prompting us to enforce them. First, we require" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 44, + 59, + 241, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 59, + 241, + 69 + ], + "spans": [ + { + "bbox": [ + 44, + 59, + 241, + 69 + ], + "type": "text", + "content": "Type-Constrained Code Generation with Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 419, + 60, + 441, + 69 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 419, + 60, + 441, + 69 + ], + "spans": [ + { + "bbox": [ + 419, + 60, + 441, + 69 + ], + "type": "text", + "content": "171:15" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 84, + 440, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 84, + 440, + 133 + ], + "spans": [ + { + "bbox": [ + 44, + 84, + 440, + 133 + ], + "type": "text", + "content": "annotations for all function parameters and return types. Second, all variable declarations must either have a type annotation or be initialized with a value. Third, we enforce type annotations for the first parameter of anonymous functions used as callbacks in the polymorphic built-in member reduce. These constraints trade off practical correctness with theoretical language completeness." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 143, + 173, + 154 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 143, + 173, + 154 + ], + "spans": [ + { + "bbox": [ + 44, + 143, + 173, + 154 + ], + "type": "text", + "content": "5 Experimental Evaluation" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 158, + 440, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 158, + 440, + 193 + ], + "spans": [ + { + "bbox": [ + 44, + 158, + 440, + 193 + ], + "type": "text", + "content": "We present an extensive evaluation of our type constraining approach on a variety of tasks and models. We outline our experimental setup (§5.1), evaluate the impact on compilation errors and functional correctness (§5.2), perform runtime analysis (§5.3), and present case studies (§5.4)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 204, + 157, + 216 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 204, + 157, + 216 + ], + "spans": [ + { + "bbox": [ + 44, + 204, + 157, + 216 + ], + "type": "text", + "content": "5.1 Experimental Setup" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 218, + 440, + 254 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 218, + 440, + 254 + ], + "spans": [ + { + "bbox": [ + 44, + 218, + 440, + 254 + ], + "type": "text", + "content": "We now outline our main evaluation setup, covering implementation, evaluated tasks, considered models, compared methods, and metrics. We provide further setup details and hyperparameter choices in Appendix B." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 261, + 440, + 296 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 261, + 440, + 296 + ], + "spans": [ + { + "bbox": [ + 44, + 261, + 440, + 296 + ], + "type": "text", + "content": "Implementation. Our implementation is written in Python and contains 11249 lines of code. To ensure robust implementation, we built a large set of around four hundred unit tests and frequently compared the behaviors of our implementation with the official TypeScript compiler [42]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 44, + 304, + 372, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 304, + 372, + 315 + ], + "spans": [ + { + "bbox": [ + 44, + 304, + 372, + 315 + ], + "type": "text", + "content": "Tasks and Benchmarks. We evaluate three relevant tasks of code generation:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 319, + 440, + 402 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 53, + 319, + 440, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 319, + 440, + 342 + ], + "spans": [ + { + "bbox": [ + 53, + 319, + 440, + 342 + ], + "type": "text", + "content": "- Synthesis: Given a natural language task description and a function header, the task is to generate a solution from scratch." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 343, + 440, + 366 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 343, + 440, + 366 + ], + "spans": [ + { + "bbox": [ + 53, + 343, + 440, + 366 + ], + "type": "text", + "content": "- Translation: Given a function written in Python and the header of an equivalent TypeScript function, the task is to generate the body of the equivalent function in TypeScript." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 367, + 440, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 367, + 440, + 402 + ], + "spans": [ + { + "bbox": [ + 53, + 367, + 440, + 402 + ], + "type": "text", + "content": "- Repair: Given a natural language task description, a non-compilable solution, the corresponding compiler error, and the function header, the task is to restore functionality of the flawed solution by resolving the compilation error." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 44, + 406, + 440, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 406, + 440, + 489 + ], + "spans": [ + { + "bbox": [ + 44, + 406, + 440, + 489 + ], + "type": "text", + "content": "The benchmarks for these tasks are based on TypeScript-translated tasks from HumanEval [12] and MBPP [5], contained in the MultiPL-E dataset [13], with 159 and 384 instances each. We observe that success in generating valid code for the same sample can vary depending on the random seed used. To obtain more comprehensive results on the small HumanEval dataset, we generate each sample 4 times with different seeds and aggregate the outcomes. In MBPP, we generate each sample once. For Repair, we collect all non-compiling programs from the unconstrained synthesis task for all models, resulting in 292 and 248 instances for HumanEval and MBPP each." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 44, + 496, + 440, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 496, + 440, + 567 + ], + "spans": [ + { + "bbox": [ + 44, + 496, + 440, + 567 + ], + "type": "text", + "content": "Models. We use 6 different open-weight LLMs, covering 3 LLMs of varying parameter sizes from the same model family and 4 models of a similar size from different model families: the Gemma 2 model family with 2B/9B/27B parameters [64], DeepSeekCoder 33B (abbreviated as DSCoder 33B) [28], CodeLlama 34B [59], and Qwen2.5 32B [73]. For all evaluated LLMs we choose the instruction-tuned variants, which are fine-tuned to follow instructions in a chat-style interaction, such that they adequately attempt to resolve the presented tasks." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 44, + 575, + 440, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 575, + 440, + 658 + ], + "spans": [ + { + "bbox": [ + 44, + 575, + 440, + 658 + ], + "type": "text", + "content": "Compared Methods. We run unconstrained LLM sampling, reported as Vanilla. We measure the upper bound improvement of prior syntactic constraining methods [8, 57, 66] by assuming that all syntactically incorrect instances generated by Vanilla could be compiled under syntactic constraining. We refer to this improvement as idealized Syntax. We separately sample using type-constrained decoding based on our completion engine introduced in §3 and §4, and report it as Types. Due to the size and complexity of the full TypeScript compiler, featuring over 427,105 lines of code in 698 files [42], our extension does not cover all features of TypeScript. We therefore" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 61, + 66, + 69 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 61, + 66, + 69 + ], + "spans": [ + { + "bbox": [ + 45, + 61, + 66, + 69 + ], + "type": "text", + "content": "171:16" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 165, + 60, + 441, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 60, + 441, + 69 + ], + "spans": [ + { + "bbox": [ + 165, + 60, + 441, + 69 + ], + "type": "text", + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 44, + 131, + 439, + 310 + ], + "blocks": [ + { + "bbox": [ + 42, + 84, + 440, + 128 + ], + "lines": [ + { + "bbox": [ + 42, + 84, + 440, + 128 + ], + "spans": [ + { + "bbox": [ + 42, + 84, + 440, + 128 + ], + "type": "text", + "content": "Table 2. Number of instances with compiler errors in unconstrained generation (Vanilla), idealized syntax-only constraining (Syntax), and our proposed type constraining (Types). Type constraining reduces compiler errors by " + }, + { + "bbox": [ + 42, + 84, + 440, + 128 + ], + "type": "inline_equation", + "content": "74.8\\%" + }, + { + "bbox": [ + 42, + 84, + 440, + 128 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 42, + 84, + 440, + 128 + ], + "type": "inline_equation", + "content": "56.0\\%" + }, + { + "bbox": [ + 42, + 84, + 440, + 128 + ], + "type": "text", + "content": " in the synthesis of HumanEval and MBPP problems respectively, compared to only " + }, + { + "bbox": [ + 42, + 84, + 440, + 128 + ], + "type": "inline_equation", + "content": "9.0\\%" + }, + { + "bbox": [ + 42, + 84, + 440, + 128 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 42, + 84, + 440, + 128 + ], + "type": "inline_equation", + "content": "4.8\\%" + }, + { + "bbox": [ + 42, + 84, + 440, + 128 + ], + "type": "text", + "content": " ideal improvement on the two datasets respectively through syntax-only constraining." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 44, + 131, + 439, + 310 + ], + "lines": [ + { + "bbox": [ + 44, + 131, + 439, + 310 + ], + "spans": [ + { + "bbox": [ + 44, + 131, + 439, + 310 + ], + "type": "table", + "html": "
ModelSynthesisTranslationRepair
VanillaSyntaxTypesVanillaSyntaxTypesVanillaSyntaxTypes
HumanEvalGemma 2 2B10392↓10.7%44↓57.3%177149↓15.8%80↓54.8%194181↓6.7%103↓46.9%
Gemma 2 9B4541↓8.9%13↓71.1%7563↓16.0%16↓78.7%113108↓4.4%52↓54.0%
Gemma 2 27B1513↓13.3%2↓86.7%2020↓0.0%3↓85.0%4540↓11.1%22↓51.1%
DS Coder 33B2625↓3.8%5↓80.8%1817↓5.6%7↓61.1%3636↓0.0%15↓58.3%
CodeLlama 34B8671↓17.4%28↓67.4%158124↓21.5%59↓62.7%153142↓7.2%48↓68.6%
Qwen2.5 32B1717↓0.0%2↓88.2%2421↓12.5%5↓79.2%3634↓5.6%13↓63.9%
MBPPGemma 2 2B6764↓4.5%27↓59.7%126111↓11.9%79↓37.3%194184↓5.2%108↓44.3%
Gemma 2 9B3029↓3.3%10↓66.7%6761↓9.0%33↓50.7%129124↓3.9%63↓51.2%
Gemma 2 27B2019↓5.0%7↓65.0%3736↓2.7%22↓40.5%7169↓2.8%32↓54.9%
DS Coder 33B3232↓0.0%19↓40.6%2927↓6.9%13↓55.2%9090↓0.0%43↓52.2%
CodeLlama 34B8071↓11.2%41↓48.8%126114↓9.5%54↓57.1%157148↓5.7%76↓51.6%
Qwen2.5 32B1918↓5.3%13↓31.6%2222↓0.0%16↓27.3%5552↓5.5%29↓47.3%
", + "image_path": "9fdda853962702543e4b74b4a4950ce8a8bb257496566563d2784d4fed07cc6e.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 42, + 331, + 441, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 331, + 441, + 380 + ], + "spans": [ + { + "bbox": [ + 42, + 331, + 441, + 380 + ], + "type": "text", + "content": "emulate a type constraining that supports the entire TypeScript feature set. Concretely, if a sample compiles correctly without any constraining, we report it as-is. Otherwise, we report the result of a constrained resample. For all methods, if generation takes more than 300 seconds, we report the partial program generated until the timeout." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 42, + 386, + 442, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 386, + 442, + 447 + ], + "spans": [ + { + "bbox": [ + 42, + 386, + 442, + 447 + ], + "type": "text", + "content": "Metrics. We compute two main metrics to assess the effectiveness of the compared methods. First, we determine the number of compiler errors in model-generated outputs. We count as a compiler error any case in which the TypeScript compiler [42] reports an issue during compilation. To measure functional correctness, we leverage the pass@1 metric [14], which measures the percentage of code generations that pass the provided unit tests given only one trial." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 43, + 457, + 303, + 468 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 457, + 303, + 468 + ], + "spans": [ + { + "bbox": [ + 43, + 457, + 303, + 468 + ], + "type": "text", + "content": "5.2 Results on Compilation and Functional Correctness" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 42, + 472, + 442, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 472, + 442, + 520 + ], + "spans": [ + { + "bbox": [ + 42, + 472, + 442, + 520 + ], + "type": "text", + "content": "In this section, we present our experimental results, showing that on all three code-generation-related tasks, our type constraining approach significantly improves the considered LLMs in generating both compileable and functionally correct code. It also substantially outperforms syntax-only constraining." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 42, + 527, + 441, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 527, + 441, + 634 + ], + "spans": [ + { + "bbox": [ + 42, + 527, + 441, + 634 + ], + "type": "text", + "content": "Reduction of Compilation Errors. In Table 2, we present the number of compilation errors produced by each compared method. For synthesis and translation, in the unconstrained setting (Vanilla), on average only " + }, + { + "bbox": [ + 42, + 527, + 441, + 634 + ], + "type": "inline_equation", + "content": "9.0\\%" + }, + { + "bbox": [ + 42, + 527, + 441, + 634 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 42, + 527, + 441, + 634 + ], + "type": "inline_equation", + "content": "4.9\\%" + }, + { + "bbox": [ + 42, + 527, + 441, + 634 + ], + "type": "text", + "content": " of the non-compiling instances in HumanEval and MBPP respectively are due to syntactic errors (Syntax), with Qwen2.5 32B even making no syntax errors at all for HumanEval synthesis and MBPP translation. In contrast, type constraining reduces compilation errors by more than half, i.e., by " + }, + { + "bbox": [ + 42, + 527, + 441, + 634 + ], + "type": "inline_equation", + "content": "75.3\\%" + }, + { + "bbox": [ + 42, + 527, + 441, + 634 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 42, + 527, + 441, + 634 + ], + "type": "inline_equation", + "content": "52.1\\%" + }, + { + "bbox": [ + 42, + 527, + 441, + 634 + ], + "type": "text", + "content": " on HumanEval and MBPP respectively. We observe that models across all sizes and families benefit similarly from our constraining, with a minimum error reduction of " + }, + { + "bbox": [ + 42, + 527, + 441, + 634 + ], + "type": "inline_equation", + "content": "54.8\\%" + }, + { + "bbox": [ + 42, + 527, + 441, + 634 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 42, + 527, + 441, + 634 + ], + "type": "inline_equation", + "content": "27.3\\%" + }, + { + "bbox": [ + 42, + 527, + 441, + 634 + ], + "type": "text", + "content": " on HumanEval and MBPP respectively, highlighting the general effectiveness of our approach." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 42, + 635, + 441, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 635, + 441, + 658 + ], + "spans": [ + { + "bbox": [ + 42, + 635, + 441, + 658 + ], + "type": "text", + "content": "A straightforward way to improve successful compilation of LLM-generated code is to feed the erroneous code and the error message back to an LLM for correction – our repair task. Thanks" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 44, + 59, + 241, + 70 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 59, + 241, + 70 + ], + "spans": [ + { + "bbox": [ + 44, + 59, + 241, + 70 + ], + "type": "text", + "content": "Type-Constrained Code Generation with Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 419, + 60, + 440, + 69 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 419, + 60, + 440, + 69 + ], + "spans": [ + { + "bbox": [ + 419, + 60, + 440, + 69 + ], + "type": "text", + "content": "171:17" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 50, + 108, + 434, + 286 + ], + "blocks": [ + { + "bbox": [ + 44, + 84, + 440, + 105 + ], + "lines": [ + { + "bbox": [ + 44, + 84, + 440, + 105 + ], + "spans": [ + { + "bbox": [ + 44, + 84, + 440, + 105 + ], + "type": "text", + "content": "Table 3. pass@1 of unconstrained generation (Vanilla) and type constraining (Types). The benefit of our type-constraining approach transfers from reduced compilation errors to improved functional correctness." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 108, + 434, + 286 + ], + "lines": [ + { + "bbox": [ + 50, + 108, + 434, + 286 + ], + "spans": [ + { + "bbox": [ + 50, + 108, + 434, + 286 + ], + "type": "table", + "html": "
ModelSynthesisTranslationRepair
VanillaTypesVanillaTypesVanillaTypes
HumanEvalGemma 2 2B29.130.2↑3.8%50.253.9↑7.5%11.620.9↑79.4%
Gemma 2 9B56.658.3↑3.1%73.778.3↑6.2%24.034.9↑45.7%
Gemma 2 27B69.571.2↑2.5%86.687.7↑1.3%38.441.1↑7.1%
DS Coder 33B68.971.1↑3.2%88.790.1↑1.6%47.650.7↑6.5%
CodeLlama 34B41.043.4↑5.7%58.663.5↑8.3%17.527.4↑56.9%
Qwen2.5 32B79.681.8↑2.8%92.193.9↑1.9%65.471.2↑8.9%
MBPPGemma 2 2B40.442.4↑5.2%52.356.0↑7.0%12.122.6↑86.7%
Gemma 2 9B65.467.4↑3.2%71.475.8↑6.2%24.231.9↑31.7%
Gemma 2 27B70.672.1↑2.2%83.184.4↑1.6%39.145.2↑15.5%
DS Coder 33B65.467.2↑2.8%85.989.1↑3.6%35.143.1↑23.0%
CodeLlama 34B42.245.6↑8.0%55.763.3↑13.6%15.726.6↑69.2%
Qwen2.5 32B76.376.6↑0.3%89.690.4↑0.9%48.054.0↑12.6%
", + "image_path": "946af2491169f3cdaa486ef0392824c733f75141a4c823922253fc196de5d150.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 305, + 440, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 305, + 440, + 388 + ], + "spans": [ + { + "bbox": [ + 44, + 305, + 440, + 388 + ], + "type": "text", + "content": "to its general applicability, our type constraining approach can also enhance this process. Our experimental results in the setting of code repair are also depicted in Table 2. We find that, in the vanilla setting, many models struggle to correctly localize and resolve compilation errors, with Gemma 2 2B for example repairing only " + }, + { + "bbox": [ + 44, + 305, + 440, + 388 + ], + "type": "inline_equation", + "content": "33.5\\%" + }, + { + "bbox": [ + 44, + 305, + 440, + 388 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 44, + 305, + 440, + 388 + ], + "type": "inline_equation", + "content": "25.8\\%" + }, + { + "bbox": [ + 44, + 305, + 440, + 388 + ], + "type": "text", + "content": " of the non-compiling HumanEval and MBPP instances, respectively. This is substantially increased to " + }, + { + "bbox": [ + 44, + 305, + 440, + 388 + ], + "type": "inline_equation", + "content": "56.4\\%" + }, + { + "bbox": [ + 44, + 305, + 440, + 388 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 44, + 305, + 440, + 388 + ], + "type": "inline_equation", + "content": "58.4\\%" + }, + { + "bbox": [ + 44, + 305, + 440, + 388 + ], + "type": "text", + "content": " through type constraining. On average, using type-constrained sampling, " + }, + { + "bbox": [ + 44, + 305, + 440, + 388 + ], + "type": "inline_equation", + "content": "53.7\\%" + }, + { + "bbox": [ + 44, + 305, + 440, + 388 + ], + "type": "text", + "content": " more compilation errors are resolved than using vanilla LLM decoding." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 394, + 441, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 394, + 441, + 491 + ], + "spans": [ + { + "bbox": [ + 44, + 394, + 441, + 491 + ], + "type": "text", + "content": "Improving Functional Correctness. Programs that do not compile are always functionally incorrect. With our type constraining method, non-compilable generations can be turned into well-formed ones, offering the possibility of achieving functional correctness. In Table 3, we experimentally show that type constraining universally improves the functional correctness of LLM-generated code. On the three tasks considered, employing type constraining improves LLMs' pass@1 rate, achieving an average increase by " + }, + { + "bbox": [ + 44, + 394, + 441, + 491 + ], + "type": "inline_equation", + "content": "3.5\\%" + }, + { + "bbox": [ + 44, + 394, + 441, + 491 + ], + "type": "text", + "content": " in synthesis, " + }, + { + "bbox": [ + 44, + 394, + 441, + 491 + ], + "type": "inline_equation", + "content": "5.0\\%" + }, + { + "bbox": [ + 44, + 394, + 441, + 491 + ], + "type": "text", + "content": " in translation, and " + }, + { + "bbox": [ + 44, + 394, + 441, + 491 + ], + "type": "inline_equation", + "content": "37.0\\%" + }, + { + "bbox": [ + 44, + 394, + 441, + 491 + ], + "type": "text", + "content": " in repair tasks. The larger improvement in the latter is due to vanilla LLMs generally struggling to generate functionally correct code. One interesting phenomenon is that, for stronger models," + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 491, + 273, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 491, + 273, + 561 + ], + "spans": [ + { + "bbox": [ + 44, + 491, + 273, + 561 + ], + "type": "text", + "content": "constraints more likely lead to recovering functionally correct code. For example on the synthesis task, for Gemma 2 27B, out of the 26 instances that required resampling to compile successfully, 17 are also functionally correct. For Qwen2.5 32B, 15 out of 21 such instances were correct." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 572, + 148, + 584 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 572, + 148, + 584 + ], + "spans": [ + { + "bbox": [ + 44, + 572, + 148, + 584 + ], + "type": "text", + "content": "5.3 Runtime Analysis" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 44, + 587, + 273, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 587, + 273, + 634 + ], + "spans": [ + { + "bbox": [ + 44, + 587, + 273, + 634 + ], + "type": "text", + "content": "As discussed in §2, compared with vanilla LLM decoding, our constrained decoding algorithm runs an additional loop (Line 4 of Algorithm 1), where tokens are sampled from an LLM-produced next-token probability distribu" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 44, + 635, + 440, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 635, + 440, + 658 + ], + "spans": [ + { + "bbox": [ + 44, + 635, + 440, + 658 + ], + "type": "text", + "content": "tion and checked against the completion engine. In this section, we investigate how this process introduces additional runtime overhead for our type constraining. Note that for each selected token," + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 281, + 540, + 439, + 630 + ], + "blocks": [ + { + "bbox": [ + 280, + 493, + 440, + 537 + ], + "lines": [ + { + "bbox": [ + 280, + 493, + 440, + 537 + ], + "spans": [ + { + "bbox": [ + 280, + 493, + 440, + 537 + ], + "type": "text", + "content": "Table 4. Median time per synthesis instance in seconds spent by our type-constrained decoding and its relative increase compared with unconstrained decoding (Vanilla)." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 281, + 540, + 439, + 630 + ], + "lines": [ + { + "bbox": [ + 281, + 540, + 439, + 630 + ], + "spans": [ + { + "bbox": [ + 281, + 540, + 439, + 630 + ], + "type": "table", + "html": "
ModelHumanEvalMBPP
Gemma 2 2B6.7↑38.3%6.3↑35.4%
Gemma 2 9B8.3↑29.2%9.5↑46.8%
Gemma 2 27B11.7↑19.9%11.7↑32.8%
DS Coder 33B11.5↑36.2%9.4↑59.5%
CodeLlama 34B7.6↑40.8%7.0↑37.6%
Qwen2.5 32B7.3↑39.6%4.9↑54.8%
", + "image_path": "743d1ede4fcce2829efeee991f239fd26c6755ecff1cb142a39893bdf51e8ec9.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 61, + 66, + 68 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 61, + 66, + 68 + ], + "spans": [ + { + "bbox": [ + 45, + 61, + 66, + 68 + ], + "type": "text", + "content": "171:18" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 165, + 60, + 441, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 60, + 441, + 69 + ], + "spans": [ + { + "bbox": [ + 165, + 60, + 441, + 69 + ], + "type": "text", + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 42, + 85, + 441, + 110 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 85, + 441, + 110 + ], + "spans": [ + { + "bbox": [ + 42, + 85, + 441, + 110 + ], + "type": "text", + "content": "vanilla and constrained decoding both run LLM inference only once, meaning that there is no extra overhead from LLM inference in constrained decoding." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 42, + 117, + 442, + 226 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 117, + 442, + 226 + ], + "spans": [ + { + "bbox": [ + 42, + 117, + 442, + 226 + ], + "type": "text", + "content": "Overhead of Type Constraining. For an application of our method in practice, the effective runtime increase due to constrained decoding is highly relevant. To assess it, we measure the runtime per synthesis instance in HumanEval and MBPP for both unconstrained and type-constrained decoding. We report the median runtime per instance for type constraining and its relative increase to unconstrained decoding in Table 4. On average over the evaluated models, we observe a relative increase of " + }, + { + "bbox": [ + 42, + 117, + 442, + 226 + ], + "type": "inline_equation", + "content": "39.1\\%" + }, + { + "bbox": [ + 42, + 117, + 442, + 226 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 42, + 117, + 442, + 226 + ], + "type": "inline_equation", + "content": "52.1\\%" + }, + { + "bbox": [ + 42, + 117, + 442, + 226 + ], + "type": "text", + "content": " in HumanEval and MBPP respectively. We consider this impact to be bearable for the observed significant decrease in compilation errors. Moreover, this is measured on an unoptimized, Python-based implementation and could be significantly improved by a more system-oriented implementation, such as the one proposed by Dong et al. [18]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 42, + 231, + 276, + 398 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 231, + 276, + 398 + ], + "spans": [ + { + "bbox": [ + 42, + 231, + 276, + 398 + ], + "type": "text", + "content": "Number of Sample-and-Check Loop Iterations. To provide an in-depth analysis of the overhead of our type constraining method, we measure the number of iterations spent by the sample-and-check loop to find an admissible token. The results are provided in Figure 8. We observe that the number of loop iterations follows a long-tail distribution. For " + }, + { + "bbox": [ + 42, + 231, + 276, + 398 + ], + "type": "inline_equation", + "content": "99.4\\%" + }, + { + "bbox": [ + 42, + 231, + 276, + 398 + ], + "type": "text", + "content": " of cases, only one loop iteration is needed. This number is even higher for stronger models, with Gemma 2 9B and 27B requiring one iteration in " + }, + { + "bbox": [ + 42, + 231, + 276, + 398 + ], + "type": "inline_equation", + "content": "99.6\\%" + }, + { + "bbox": [ + 42, + 231, + 276, + 398 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 42, + 231, + 276, + 398 + ], + "type": "inline_equation", + "content": "99.9\\%" + }, + { + "bbox": [ + 42, + 231, + 276, + 398 + ], + "type": "text", + "content": " of cases, respectively. This means that, in most instances, LLMs can generate a valid token on the first attempt, which is then verified by the completion engine. In cases where more than one iteration is needed, the completion engine intervenes to guide the selection" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 284, + 236, + 437, + 339 + ], + "blocks": [ + { + "bbox": [ + 284, + 236, + 437, + 339 + ], + "lines": [ + { + "bbox": [ + 284, + 236, + 437, + 339 + ], + "spans": [ + { + "bbox": [ + 284, + 236, + 437, + 339 + ], + "type": "image", + "image_path": "847ef4f35ce5a95c759a2415d323273e8c7cbd2578acc0a62740b9dd06115d1b.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 279, + 342, + 443, + 397 + ], + "lines": [ + { + "bbox": [ + 279, + 342, + 443, + 397 + ], + "spans": [ + { + "bbox": [ + 279, + 342, + 443, + 397 + ], + "type": "text", + "content": "Figure 8. Histogram on the number of iterations consumed by the sample-and-check loop at Line 4 of Algorithm 1 to find a valid token, measured with Gemma 2 2B for HumanEval synthesis." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 42, + 399, + 442, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 399, + 442, + 422 + ], + "spans": [ + { + "bbox": [ + 42, + 399, + 442, + 422 + ], + "type": "text", + "content": "of valid tokens. These interventions help resolve errors in many instances in our benchmarks, providing significant benefit, as discussed in §5.2." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 42, + 423, + 443, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 423, + 443, + 531 + ], + "spans": [ + { + "bbox": [ + 42, + 423, + 443, + 531 + ], + "type": "text", + "content": "Prior work [8, 57, 66] implemented constrained decoding differently than Algorithm 1. Instead of running the sample-and-check loop, they execute the completion engine for all tokens in the LLM's vocabulary, mask out all invalid tokens, and sample once from the remaining valid tokens based on their normalized likelihoods. This implementation is less efficient than ours, especially when calling the completion engine is costly. Based on the long-tail distribution depicted in Figure 8, our implementation requires only a single invocation to the completion engine for most cases, whereas their implementation calls it for every token in the LLM's vocabulary for all cases. In our experiments, their implementation results in timeouts for all benchmark instances, with a timeout of 5 minutes per instance. This justifies our choice of using the sample-and-check loop." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 43, + 541, + 120, + 552 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 541, + 120, + 552 + ], + "spans": [ + { + "bbox": [ + 43, + 541, + 120, + 552 + ], + "type": "text", + "content": "5.4 Case Study" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 42, + 555, + 442, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 555, + 442, + 593 + ], + "spans": [ + { + "bbox": [ + 42, + 555, + 442, + 593 + ], + "type": "text", + "content": "For a qualitative evaluation, we manually inspect instances where unconstrained decoding fails and our type constraining approach successfully corrects errors. We find that our technique effectively amends various types of compilation errors. We showcase three such examples in Figure 9." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 42, + 599, + 442, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 599, + 442, + 659 + ], + "spans": [ + { + "bbox": [ + 42, + 599, + 442, + 659 + ], + "type": "text", + "content": "Wrong Parameter Count. In Figure 9a, the task is to find all words in a string that contain a number of consonants. When Gemma 2 2B attempts to solve this problem, translating from the Python version to TypeScript, it calls split without arguments. This is allowed in Python, but not in TypeScript, where a single string argument is required. Type constraining correctly detects this and guides the generation to sample a space character as parameter." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 44, + 59, + 241, + 70 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 59, + 241, + 70 + ], + "spans": [ + { + "bbox": [ + 44, + 59, + 241, + 70 + ], + "type": "text", + "content": "Type-Constrained Code Generation with Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 419, + 60, + 441, + 69 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 419, + 60, + 441, + 69 + ], + "spans": [ + { + "bbox": [ + 419, + 60, + 441, + 69 + ], + "type": "text", + "content": "171:19" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 48, + 83, + 439, + 244 + ], + "blocks": [ + { + "bbox": [ + 48, + 83, + 439, + 244 + ], + "lines": [ + { + "bbox": [ + 48, + 83, + 439, + 244 + ], + "spans": [ + { + "bbox": [ + 48, + 83, + 439, + 244 + ], + "type": "table", + "html": "
VanillaTypes
(a)// find all words with consonants function select_words(s: string): string[] { for (const word of s. split()// find all words with consonants function select_words(s: string): string[] { for (const word of s. split(' ')
(b)function largest_divisor(n: number): number { for(let i = n - 1; i > 0; i-- ) { if(n % i == 0) return i; } // No return }function largest_divisor(n: number): number { for(let i = n - 1; i > 0; i-- ) { if(n % i == 0) return i; } return 1; }
(c)// insert a delimiter between every element return nums.reduce( (acc, curr, index) => { acc.pushurr); return acc; }, [])// insert a delimiter between every element return nums.reduce( (acc: number[] , num, index) => { acc.pushurr); return acc; }, [])
", + "image_path": "bf0afb47f1cfa0e9f681afec1348c6553d11edd6b8f05b746402a2fb18e6f6cd.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 42, + 249, + 440, + 328 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 249, + 440, + 328 + ], + "spans": [ + { + "bbox": [ + 42, + 249, + 440, + 328 + ], + "type": "text", + "content": "Figure 9. Three examples illustrating the effect of type-constrained sampling. Left are unconstrained generations with problematic tokens highlighted in red, and right are type-constrained results with corrected tokens highlighted in green, adapted for clarity. In (a), Gemma 2 2B attempts to call split, missing required arguments. In (b), DeepSeekCoder 33B attempts to complete a function without a guaranteed return. The issue is resolved by forcing generation of another statement after the main loop. In (c), Gemma 2 9B calls reduce with an anonymous function without type annotation. This leads to an incorrect type inference for the first parameter. The issue is solved by guiding the model to add type annotation." + } + ] + } + ], + "index": 3, + "type": "text" + }, + { + "bbox": [ + 42, + 353, + 440, + 424 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 353, + 440, + 424 + ], + "spans": [ + { + "bbox": [ + 42, + 353, + 440, + 424 + ], + "type": "text", + "content": "Missing Return Statement. In Figure 9b, to complete function largest_divisor, the model must compute a straightforward divisor loop. DeepSeekCoder 33B Instruct [28] implements a correct loop, but does not guarantee returning a value in every execution path. When the return statement in the loop is never executed, e.g., for negative inputs, the function thus returns undefined, violating the type rules. Our method detects this issue and forces the generation of another statement in the function body, resulting in a correct fallback return statement." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 42, + 433, + 440, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 433, + 440, + 540 + ], + "spans": [ + { + "bbox": [ + 42, + 433, + 440, + 540 + ], + "type": "text", + "content": "Incorrect Type Inference. In Figure 9c, the task is to insert a delimiter between every element in an array. Gemma 2 9B solves this with the reduce function. This generic function accepts two arguments; first, a callback function that is called consecutively for every element in the array and accumulates a result, second, an initial value for the callback function. The type of the accumulator of the callback is derived implicitly from the second argument, which is an empty array in the given example. TypeScript infers special type never[] for the empty array, disallowing inserting curr of type number through push. Therefore, the program fails to compile. This issue is a well-known limitation of the TypeScript compiler, often confusing even expert developers [47, 48]. Our method resolves it by enforcing adequate type annotation on the first argument of the callback function." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 43, + 552, + 109, + 562 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 552, + 109, + 562 + ], + "spans": [ + { + "bbox": [ + 43, + 552, + 109, + 562 + ], + "type": "text", + "content": "6 Discussion" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 42, + 566, + 441, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 566, + 441, + 589 + ], + "spans": [ + { + "bbox": [ + 42, + 566, + 441, + 589 + ], + "type": "text", + "content": "Our general type constraining approach, backed by strong experimental results, opens exciting avenues for future research, which we discuss below." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 42, + 599, + 442, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 599, + 442, + 658 + ], + "spans": [ + { + "bbox": [ + 42, + 599, + 442, + 658 + ], + "type": "text", + "content": "Implementation Effort. Developing a completion engine for a target programming language currently requires manual efforts. However, we expect that the involved effort to adopt our method to other languages will be reduced significantly, as many features transfer from our implementation for " + }, + { + "bbox": [ + 42, + 599, + 442, + 658 + ], + "type": "inline_equation", + "content": "L_{B}" + }, + { + "bbox": [ + 42, + 599, + 442, + 658 + ], + "type": "text", + "content": " and TypeScript. Moreover, we believe that, due to the huge impact on LLM's code generation, the effort will pay off. Future programming language developers may consider generally writing" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 61, + 65, + 68 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 61, + 65, + 68 + ], + "spans": [ + { + "bbox": [ + 45, + 61, + 65, + 68 + ], + "type": "text", + "content": "171:20" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 165, + 60, + 440, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 60, + 440, + 69 + ], + "spans": [ + { + "bbox": [ + 165, + 60, + 440, + 69 + ], + "type": "text", + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 46, + 87, + 437, + 133 + ], + "blocks": [ + { + "bbox": [ + 46, + 87, + 437, + 133 + ], + "lines": [ + { + "bbox": [ + 46, + 87, + 437, + 133 + ], + "spans": [ + { + "bbox": [ + 46, + 87, + 437, + 133 + ], + "type": "text", + "content": "function sort_threel(number[],r:number[]):number[]{ \nfor(let " + }, + { + "bbox": [ + 46, + 87, + 437, + 133 + ], + "type": "inline_equation", + "content": "\\mathrm{i} = 0" + }, + { + "bbox": [ + 46, + 87, + 437, + 133 + ], + "type": "text", + "content": " ;i<1.length; " + }, + { + "bbox": [ + 46, + 87, + 437, + 133 + ], + "type": "inline_equation", + "content": "\\mathrm{i + + }" + }, + { + "bbox": [ + 46, + 87, + 437, + 133 + ], + "type": "text", + "content": " ){ \nr.push(l[i].toString().slice(0,3).concat(l[i].ToString().split())'.split').reverse() .join(')).split''.reverse().join('').ToString() " + }, + { + "bbox": [ + 46, + 87, + 437, + 133 + ], + "type": "inline_equation", + "content": "^+" + }, + { + "bbox": [ + 46, + 87, + 437, + 133 + ], + "type": "text", + "content": " l[i].ToString().slice(3).split')…" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_body" + } + ], + "index": 2, + "sub_type": "code", + "guess_lang": "javascript" + }, + { + "bbox": [ + 42, + 137, + 442, + 172 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 137, + 442, + 172 + ], + "spans": [ + { + "bbox": [ + 42, + 137, + 442, + 172 + ], + "type": "text", + "content": "Figure 10. Complications errors remain when the model does not terminate after a corrected token. In this example for synthesis on the HumanEval task #33, CodeLlama 34B is steered away from accessing non-existing member .sort and instead accesses .string." + } + ] + } + ], + "index": 3, + "type": "text" + }, + { + "bbox": [ + 42, + 197, + 441, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 197, + 441, + 222 + ], + "spans": [ + { + "bbox": [ + 42, + 197, + 441, + 222 + ], + "type": "text", + "content": "their compilers as an incremental completion engine, which additionally enables automatic adoption for constrained code generation, besides conventional grammar parsing and type checking." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 228, + 442, + 408 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 228, + 442, + 408 + ], + "spans": [ + { + "bbox": [ + 44, + 228, + 442, + 408 + ], + "type": "text", + "content": "Broader Application to More Complex Tasks and Stronger LLMs. Stronger LLMs, such as the latest OpenAI models [33], may make fewer typing errors on the HumanEval and MBPP datasets. Our evaluation results in Table 2 also demonstrate that compilation errors decrease with increasing model size for the Gemma family. However, recent findings showed that currently, even the strongest LLMs struggle with generating compilable code for more complex coding tasks, stricter typing rules, and low-resource languages (e.g., new DSLs). Gusanidas [29] evaluated various state-of-the-art LLMs on difficult code synthesis tasks in Rust, reporting compilation error rates of " + }, + { + "bbox": [ + 44, + 228, + 442, + 408 + ], + "type": "inline_equation", + "content": "18\\%" + }, + { + "bbox": [ + 44, + 228, + 442, + 408 + ], + "type": "text", + "content": " for OpenAI o1-mini [33], " + }, + { + "bbox": [ + 44, + 228, + 442, + 408 + ], + "type": "inline_equation", + "content": "39\\%" + }, + { + "bbox": [ + 44, + 228, + 442, + 408 + ], + "type": "text", + "content": " for DeepSeek R1 [15] and " + }, + { + "bbox": [ + 44, + 228, + 442, + 408 + ], + "type": "inline_equation", + "content": "27\\%" + }, + { + "bbox": [ + 44, + 228, + 442, + 408 + ], + "type": "text", + "content": " for Anthropic's Claude 3.5 Sonnet [2]. For OCaml and Haskell, which are sparsely represented in LLMs' training data, the error rate is even higher at " + }, + { + "bbox": [ + 44, + 228, + 442, + 408 + ], + "type": "inline_equation", + "content": "40\\% - 60\\%" + }, + { + "bbox": [ + 44, + 228, + 442, + 408 + ], + "type": "text", + "content": " for all models, matching a trend of worse performance on low-resource languages [24, 36]. Pan et al. [54] compiled a large dataset of code translation and found " + }, + { + "bbox": [ + 44, + 228, + 442, + 408 + ], + "type": "inline_equation", + "content": "44.3\\%" + }, + { + "bbox": [ + 44, + 228, + 442, + 408 + ], + "type": "text", + "content": " of GPT-4-generated code to contain compilation errors. Similarly, Shetty et al. [61] report around " + }, + { + "bbox": [ + 44, + 228, + 442, + 408 + ], + "type": "inline_equation", + "content": "25\\%" + }, + { + "bbox": [ + 44, + 228, + 442, + 408 + ], + "type": "text", + "content": " compilation errors for C-to-Rust translation using OpenAI o1 models. Our type constraining approach is broadly applicable to all these scenarios and our work presents a promising proof of concept. Future work can consider building upon our approach to address these challenges." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 42, + 408, + 442, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 408, + 442, + 468 + ], + "spans": [ + { + "bbox": [ + 42, + 408, + 442, + 468 + ], + "type": "text", + "content": "Constrained decoding in general requires access to the next-token probability distributions produced by LLMs. Currently, commercially available black-box LLM APIs only return sampled tokens and do not offer complete next-token distributions. A possible solution is to integrate our method into the backend of model providers, as was recently implemented for guaranteeing adherence to JSON Schemas [3, 50]." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 42, + 476, + 442, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 476, + 442, + 561 + ], + "spans": [ + { + "bbox": [ + 42, + 476, + 442, + 561 + ], + "type": "text", + "content": "Remaining Compiler Errors. We observe that, even though constrained decoding guarantees a valid result upon termination, a considerable amount of compilation errors remain due to non-termination within the token or time limit. We find this to be caused by generation loops, entered when generation is amended by constraints and the LLM is unable to recover. An example is depicted in Figure 10, where CodeLlama 34B tries to access the invalid member sort on an expression of type number. Future work may add additional constraints to force stopping such unconstructive loops and steer the model more strictly, e.g., by limiting the complexity of generated expressions." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 43, + 571, + 125, + 582 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 571, + 125, + 582 + ], + "spans": [ + { + "bbox": [ + 43, + 571, + 125, + 582 + ], + "type": "text", + "content": "7 Related Work" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 42, + 586, + 442, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 586, + 442, + 659 + ], + "spans": [ + { + "bbox": [ + 42, + 586, + 442, + 659 + ], + "type": "text", + "content": "Code Language Models. Recently, LLMs have gained traction for diverse coding tasks such as code synthesis, repair, or translation [35]. These models are typically trained on datasets containing billions to trillions of tokens and have billions of parameters, with both factors contributing to improved performance in code-related benchmarks [28, 46, 59, 64]. Meanwhile, LLMs are well known to frequently make mistakes [32, 58], and, as we show in this work, even state-of-the-art open-weight models with over 30 billion parameters frequently make errors in code generation." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 43, + 59, + 241, + 70 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 59, + 241, + 70 + ], + "spans": [ + { + "bbox": [ + 43, + 59, + 241, + 70 + ], + "type": "text", + "content": "Type-Constrained Code Generation with Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 419, + 60, + 440, + 69 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 419, + 60, + 440, + 69 + ], + "spans": [ + { + "bbox": [ + 419, + 60, + 440, + 69 + ], + "type": "text", + "content": "171:21" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 84, + 441, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 84, + 441, + 192 + ], + "spans": [ + { + "bbox": [ + 44, + 84, + 441, + 192 + ], + "type": "text", + "content": "Improving Language Model Accuracy. Apart from constrained decoding, three primary approaches have been proposed to enhance the accuracy of language models on code tasks: fine-tuning, retrieval augmentation (RAG), and compiler or execution feedback. Fine-tuning adapts the model weights based on specifically collected training data. This process is highly resource intensive [65, 70]. RAG provides the model with additional context based on a database or related code snippets [6, 57]. Compiler and execution feedback is only available after completing the model generation and requires resampling [16, 34, 69]. However, constrained decoding is orthogonal to these methods and, as indicated by Poesia et al. [57] and our experimental results, combining constrained decoding with RAG or compiler feedback additionally improves model performance." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 198, + 441, + 294 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 198, + 441, + 294 + ], + "spans": [ + { + "bbox": [ + 44, + 198, + 441, + 294 + ], + "type": "text", + "content": "Constrained Decoding. Prior work on constrained decoding failed to achieve strong results due to its limitation to syntactic language features. Constraining to context-free languages has been explored extensively in recent work [7, 8, 57, 71]. Simple context-sensitive syntactic features, such as the space indentation in Python and the scope markers in Go have also been implemented [41, 66]. As demonstrated in §5, however, syntax errors on average account for only " + }, + { + "bbox": [ + 44, + 198, + 441, + 294 + ], + "type": "inline_equation", + "content": "6\\%" + }, + { + "bbox": [ + 44, + 198, + 441, + 294 + ], + "type": "text", + "content": " of compilation errors in recent code models. The rarity of syntax errors significantly reduces the potential of leveraging them for improvements in code correctness. Meanwhile, our type-constrained decoding more than halved compilation errors." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 300, + 441, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 300, + 441, + 419 + ], + "spans": [ + { + "bbox": [ + 44, + 300, + 441, + 419 + ], + "type": "text", + "content": "Type Systems for Code Synthesis. Previous work that leveraged type systems for code synthesis was confined to specialized settings and unable to constrain general, complex program generation. Poesia et al. [57] proposed using known column names to guide SQL query generation. Gvero et al. [30] employed a search on the type graph for function call completion. Agrawal et al. [1] leverage language-server-generated type annotations for object member accesses. Blinn et al. [11] use language-server-derived type information to provide additional context to the LLM, but not to enforce hard constraints. Additionally, type constraints have been used to direct code synthesis based on specialized search procedures [22, 56, 69]. However, these methods are not compatible with LLM-based code generation. This limits their ability to exploit the powerful natural language and general-purpose capabilities of LLMs." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 429, + 111, + 440 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 429, + 111, + 440 + ], + "spans": [ + { + "bbox": [ + 44, + 429, + 111, + 440 + ], + "type": "text", + "content": "8 Conclusion" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 444, + 441, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 444, + 441, + 551 + ], + "spans": [ + { + "bbox": [ + 44, + 444, + 441, + 551 + ], + "type": "text", + "content": "In this work, we explored how type systems in programming languages can be used to guide language models during decoding. Concretely, we design and implement prefix automata to perform type constraining for a foundational simply typed language and then extend it to the popular language TypeScript. We extensively evaluate the impact of using such constraints for code synthesis, translation, and repair and observe that we more than halve compilation errors on a diverse set of models and consistently increase functional correctness. We further explore qualitatively how the constraining positively impacts code generation. We conclude that such type constraining should be implemented for more programming languages, and has the potential to generally improve code generation in many domains." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 61, + 66, + 68 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 61, + 66, + 68 + ], + "spans": [ + { + "bbox": [ + 45, + 61, + 66, + 68 + ], + "type": "text", + "content": "171:22" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 165, + 60, + 441, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 60, + 441, + 69 + ], + "spans": [ + { + "bbox": [ + 165, + 60, + 441, + 69 + ], + "type": "text", + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "bbox": [ + 43, + 85, + 136, + 98 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 85, + 136, + 98 + ], + "spans": [ + { + "bbox": [ + 43, + 85, + 136, + 98 + ], + "type": "text", + "content": "Artifact Availability" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 42, + 100, + 441, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 100, + 441, + 125 + ], + "spans": [ + { + "bbox": [ + 42, + 100, + 441, + 125 + ], + "type": "text", + "content": "The artifact for this paper, including source code, datasets, and reproductions scripts, is available on GitHub (https://github.com/eth-sri/type-constrained-code-generation) and Zenodo [45]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 43, + 135, + 136, + 147 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 135, + 136, + 147 + ], + "spans": [ + { + "bbox": [ + 43, + 135, + 136, + 147 + ], + "type": "text", + "content": "Acknowledgements" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 42, + 149, + 441, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 149, + 441, + 174 + ], + "spans": [ + { + "bbox": [ + 42, + 149, + 441, + 174 + ], + "type": "text", + "content": "We would like to thank the anonymous reviewers for their in-depth and constructive feedback, and the artifact reviewers for their feedback on our artifact accessibility." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 185, + 96, + 196 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 185, + 96, + 196 + ], + "spans": [ + { + "bbox": [ + 45, + 185, + 96, + 196 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 199, + 441, + 658 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 48, + 199, + 441, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 199, + 441, + 229 + ], + "spans": [ + { + "bbox": [ + 48, + 199, + 441, + 229 + ], + "type": "text", + "content": "[1] Lakshya Agrawal, Aditya Kanade, Navin Goyal, Shuvendu K Lahiri, and Sriram Rajamani. 2023. Monitor-Guided Decoding of Code LMs with Static Analysis of Repository Context. In NeurIPS. https://openreview.net/forum?id=qPUbKxKvXq" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 229, + 441, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 229, + 441, + 249 + ], + "spans": [ + { + "bbox": [ + 48, + 229, + 441, + 249 + ], + "type": "text", + "content": "[2] Anthropic. [n.d.]. Claude 3 Model Card. https://assets.anthropic.com/m/61e7d27f8c8f5919/original/Claude-3-ModelCard.pdf Accessed: March 10, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 249, + 441, + 268 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 249, + 441, + 268 + ], + "spans": [ + { + "bbox": [ + 48, + 249, + 441, + 268 + ], + "type": "text", + "content": "[3] Anthropic. 2025. JSON Mode. https://docs.anthropic.com/en/docs/build-with-claude/tool-use#json-mode Accessed: March 10, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 268, + 296, + 279 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 268, + 296, + 279 + ], + "spans": [ + { + "bbox": [ + 48, + 268, + 296, + 279 + ], + "type": "text", + "content": "[4] Ken Arnold and James Gosling. 1996. The Java Programming Language." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 279, + 441, + 309 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 279, + 441, + 309 + ], + "spans": [ + { + "bbox": [ + 48, + 279, + 441, + 309 + ], + "type": "text", + "content": "[5] Jacob Austin, Augustus Odena, Maxwell I. Nye, Maarten Bosma, Henryk Michalewski, David Dohan, Ellen Jiang, Carrie J. Cai, Michael Terry, Quoc V. Le, et al. 2021. Program Synthesis with Large Language Models. arXiv Preprint (2021). https://arxiv.org/abs/2108.07732" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 309, + 441, + 329 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 309, + 441, + 329 + ], + "spans": [ + { + "bbox": [ + 48, + 309, + 441, + 329 + ], + "type": "text", + "content": "[6] Nastaran Bassamzadeh and Chhaya Methani. 2024. A Comparative Study of DSL Code Generation: Fine-Tuning vs. Optimized Retrieval Augmentation. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2407.02742" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 329, + 441, + 349 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 329, + 441, + 349 + ], + "spans": [ + { + "bbox": [ + 48, + 329, + 441, + 349 + ], + "type": "text", + "content": "[7] Luca Beurer-Kellner, Marc Fischer, and Martin Vechev. 2023. Prompting Is Programming: A Query Language for Large Language Models. PLDI (2023). https://doi.org/10.1145/3591300" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 349, + 441, + 368 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 349, + 441, + 368 + ], + "spans": [ + { + "bbox": [ + 48, + 349, + 441, + 368 + ], + "type": "text", + "content": "[8] Luca Beurer-Kellner, Marc Fischer, and Martin Vechev. 2024. Guiding LLMs The Right Way: Fast, Non-Invasive Constrained Generation. In ICML. https://openreview.net/forum?id=pXaEYzrFae" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 368, + 441, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 368, + 441, + 388 + ], + "spans": [ + { + "bbox": [ + 48, + 368, + 441, + 388 + ], + "type": "text", + "content": "[9] Satwik Bhattachamishra, Kabir Ahuja, and Navin Goyal. 2020. On the Ability and Limitations of Transformers to Recognize Formal Languages. In EMNLP. https://doi.org/10.18653/v1/2020.emnlp-main.576" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 44, + 388, + 389, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 388, + 389, + 398 + ], + "spans": [ + { + "bbox": [ + 44, + 388, + 389, + 398 + ], + "type": "text", + "content": "[10] Gavin M. Bierman, Martin Abadi, and Mads Torgersen. 2014. Understanding TypeScript. In ECOOP." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 44, + 398, + 441, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 398, + 441, + 419 + ], + "spans": [ + { + "bbox": [ + 44, + 398, + 441, + 419 + ], + "type": "text", + "content": "[11] Andrew Blinn, Xiang Li, June Hyung Kim, and Cyrus Omar. 2024. Statically Contextualizing Large Language Models with Typed Holes. OOPSLA (2024). https://doi.org/10.1145/3689728" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 44, + 419, + 441, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 419, + 441, + 448 + ], + "spans": [ + { + "bbox": [ + 44, + 419, + 441, + 448 + ], + "type": "text", + "content": "[12] Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. 2020. Language Models are Few-Shot Learners. In NeurIPS. https://proceedings.neurips.cc/paper/2020/bash/1457c0d6bfcb4967418bf8ac142f64a-Abstract.html" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 44, + 448, + 441, + 478 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 448, + 441, + 478 + ], + "spans": [ + { + "bbox": [ + 44, + 448, + 441, + 478 + ], + "type": "text", + "content": "[13] Federico Cassano, John Gouwar, Daniel Nguyen, Sydney Nguyen, Luna Phipps-Costin, Donald Pinckney, Ming-Ho Yee, Yangtian Zi, Carolyn Jane Anderson, Molly Q. Feldman, et al. 2023. MultiPL-E: A Scalable and Polyglot Approach to Benchmarking Neural Code Generation. IEEE Trans. Software Eng. (2023)." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 44, + 478, + 441, + 509 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 478, + 441, + 509 + ], + "spans": [ + { + "bbox": [ + 44, + 478, + 441, + 509 + ], + "type": "text", + "content": "[14] Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Pondé de Oliveira Pinto, Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, Greg Brockman, et al. 2021. Evaluating Large Language Models Trained on Code. arXiv Preprint (2021). https://arxiv.org/abs/2107.03374" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 44, + 509, + 441, + 538 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 509, + 441, + 538 + ], + "spans": [ + { + "bbox": [ + 44, + 509, + 441, + 538 + ], + "type": "text", + "content": "[15] DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, et al. 2025. DeepSeek-R1: Incentivizing Reasoning Capability in LLMs via Reinforcement Learning. arXiv Preprint (2025). https://doi.org/10.48550/arXiv.2501.12948" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 44, + 538, + 441, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 538, + 441, + 568 + ], + "spans": [ + { + "bbox": [ + 44, + 538, + 441, + 568 + ], + "type": "text", + "content": "[16] Pantazis Deligiannis, Akash Lal, Nikita Mehrotra, Rishi Poddar, and Aseem Rastogi. 2025. RustAssistant: Using LLMs to Fix Compilation Errors in Rust Code. In ICSE. https://www.microsoft.com/en-us/research/publication/rustassistant-using-llms-to-fix-compiler-errors-in-rust-code/" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 44, + 568, + 441, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 568, + 441, + 588 + ], + "spans": [ + { + "bbox": [ + 44, + 568, + 441, + 588 + ], + "type": "text", + "content": "[17] TypeScript Developers. [n.d.]. TypeScript: Documentation – More on Functions. https://www.typescriptlang.org/docs/handbook/2/functions.html#function-type-expressions Accessed: March 10, 2025." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 44, + 588, + 441, + 617 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 588, + 441, + 617 + ], + "spans": [ + { + "bbox": [ + 44, + 588, + 441, + 617 + ], + "type": "text", + "content": "[18] Yixin Dong, Charlie F. Ruan, Yaxing Cai, Ruihang Lai, Ziyi Xu, Yilong Zhao, and Tianqi Chen. 2024. XGrammar: Flexible and Efficient Structured Generation Engine for Large Language Models. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2411.15100" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 44, + 617, + 328, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 617, + 328, + 628 + ], + "spans": [ + { + "bbox": [ + 44, + 617, + 328, + 628 + ], + "type": "text", + "content": "[19] Alan AA Donovan and Brian W Kernighan. 2015. The Go programming language." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 44, + 628, + 441, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 628, + 441, + 658 + ], + "spans": [ + { + "bbox": [ + 44, + 628, + 441, + 658 + ], + "type": "text", + "content": "[20] Shihan Dou, Haoxiang Jia, Shenxi Wu, Huiyuan Zheng, Weikang Zhou, Muling Wu, Mingxu Chai, Jessica Fan, Caishuang Huang, Yunbo Tao, et al. 2024. What's Wrong with Your Code Generated by Large Language Models? An Extensive Study. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2407.06153" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 43, + 59, + 241, + 70 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 59, + 241, + 70 + ], + "spans": [ + { + "bbox": [ + 43, + 59, + 241, + 70 + ], + "type": "text", + "content": "Type-Constrained Code Generation with Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 419, + 60, + 441, + 69 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 419, + 60, + 441, + 69 + ], + "spans": [ + { + "bbox": [ + 419, + 60, + 441, + 69 + ], + "type": "text", + "content": "171:23" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 86, + 441, + 644 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 44, + 86, + 441, + 107 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 86, + 441, + 107 + ], + "spans": [ + { + "bbox": [ + 44, + 86, + 441, + 107 + ], + "type": "text", + "content": "[21] Javid Ebrahimi, Dhruv Gelda, and Wei Zhang. 2020. How Can Self-Attention Networks Recognize Dyck-n Languages?. In EMNLP. https://aclanthology.org/2020-findings-emnlp.384/" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 107, + 441, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 107, + 441, + 127 + ], + "spans": [ + { + "bbox": [ + 44, + 107, + 441, + 127 + ], + "type": "text", + "content": "[22] Jonás Fiala, Shachar Itzhaky, Peter Müller, Nadia Polikarpova, and Ilya Sergey. 2023. Leveraging Rust Types for Program Synthesis. PLDI (2023). https://doi.org/10.1145/3591278" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 127, + 440, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 127, + 440, + 146 + ], + "spans": [ + { + "bbox": [ + 44, + 127, + 440, + 146 + ], + "type": "text", + "content": "[23] Zheng Gao, Christian Bird, and Earl T. Barr. 2017. To type or not to type: quantifying detectable bugs in JavaScript. In ICSE. https://doi.org/10.1109/ICSE.2017.75" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 146, + 440, + 167 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 146, + 440, + 167 + ], + "spans": [ + { + "bbox": [ + 44, + 146, + 440, + 167 + ], + "type": "text", + "content": "[24] Alessandro Giagnorio, Alberto Martin-Lopez, and Gabriele Bavota. 2025. Enhancing Code Generation for Low-Resource Languages: No Silver Bullet. arXiv Preprint (2025). https://doi.org/10.48550/arXiv.2501.19085" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 167, + 231, + 176 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 167, + 231, + 176 + ], + "spans": [ + { + "bbox": [ + 44, + 167, + 231, + 176 + ], + "type": "text", + "content": "[25] GitHub. [n.d.]. https://github.com/features/copilot" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 177, + 432, + 187 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 177, + 432, + 187 + ], + "spans": [ + { + "bbox": [ + 44, + 177, + 432, + 187 + ], + "type": "text", + "content": "[26] GitHub. 2022. The top programming languages. https://octoverse.github.com/2022/top-programming-languages" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 44, + 187, + 441, + 216 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 187, + 441, + 216 + ], + "spans": [ + { + "bbox": [ + 44, + 187, + 441, + 216 + ], + "type": "text", + "content": "[27] Aaron Grattaflori, Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Alex Vaughan, et al. 2024. The Llama 3 Herd of Models. ArXiv Preprint (2024). https://arxiv.org/abs/2407.21783" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 44, + 216, + 441, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 216, + 441, + 246 + ], + "spans": [ + { + "bbox": [ + 44, + 216, + 441, + 246 + ], + "type": "text", + "content": "[28] Daya Guo, Qihao Zhu, Dejian Yang, Zhenda Xie, Kai Dong, Wentao Zhang, Guanting Chen, Xiao Bi, Y. Wu, Y. K. Li, et al. 2024. DeepSeek-Coder: When the Large Language Model Meets Programming - The Rise of Code Intelligence. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2401.14196" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 44, + 246, + 440, + 266 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 246, + 440, + 266 + ], + "spans": [ + { + "bbox": [ + 44, + 246, + 440, + 266 + ], + "type": "text", + "content": "[29] Gusanidas. [n.d.]. Compilation Benchmark. https://github.com/Gusanidas/compilation-benchmark Accessed: March 10, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 44, + 267, + 440, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 267, + 440, + 286 + ], + "spans": [ + { + "bbox": [ + 44, + 267, + 440, + 286 + ], + "type": "text", + "content": "[30] Tihomir Gvero, Viktor Kuncak, Ivan Kuraj, and Ruzica Piskac. 2013. Complete completion using types and weights. In PLDI. https://doi.org/10.1145/2491956.2462192" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 44, + 286, + 415, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 286, + 415, + 296 + ], + "spans": [ + { + "bbox": [ + 44, + 286, + 415, + 296 + ], + "type": "text", + "content": "[31] John E. Hopcroft and Jeffrey D. Ullman. 1979. Introduction to Automata Theory, Languages and Computation." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 44, + 296, + 441, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 296, + 441, + 326 + ], + "spans": [ + { + "bbox": [ + 44, + 296, + 441, + 326 + ], + "type": "text", + "content": "[32] Lei Huang, Wejiang Yu, Weitao Ma, Weihong Zhong, Zhangyin Feng, Haotian Wang, Qianglong Chen, Weihua Peng, Xiaocheng Feng, Bing Qin, et al. 2023. A Survey on Hallucination in Large Language Models: Principles, Taxonomy, Challenges, and Open Questions. arXiv Preprint (2023). https://doi.org/10.48550/arXiv.2311.05232" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 44, + 326, + 441, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 326, + 441, + 354 + ], + "spans": [ + { + "bbox": [ + 44, + 326, + 441, + 354 + ], + "type": "text", + "content": "[33] Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. 2024. OpenAI o1 System Card. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2412.16720" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 44, + 355, + 440, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 355, + 440, + 385 + ], + "spans": [ + { + "bbox": [ + 44, + 355, + 440, + 385 + ], + "type": "text", + "content": "[34] Prithwish Jana, Piyush Jha, Haoyang Ju, Gautham Kishore, Aryan Mahajan, and Vijay Ganesh. 2024. CoTran: An LLM-Based Code Translator Using Reinforcement Learning with Feedback from Compiler and Symbolic Execution. In ECAI (Frontiers in Artificial Intelligence and Applications). https://doi.org/10.3233/FAIA240968" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 44, + 385, + 440, + 406 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 385, + 440, + 406 + ], + "spans": [ + { + "bbox": [ + 44, + 385, + 440, + 406 + ], + "type": "text", + "content": "[35] Juyong Jiang, Fan Wang, Jiasi Shen, Sungju Kim, and Sunghun Kim. 2024. A Survey on Large Language Models for Code Generation. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2406.00515" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 44, + 406, + 440, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 406, + 440, + 426 + ], + "spans": [ + { + "bbox": [ + 44, + 406, + 440, + 426 + ], + "type": "text", + "content": "[36] Sathvik Joel, Jie JW Wu, and Fatemeh H. Fard. 2024. Survey on Code Generation for Low resource and Domain Specific Programming Languages. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2410.03981" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 44, + 426, + 441, + 455 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 426, + 441, + 455 + ], + "spans": [ + { + "bbox": [ + 44, + 426, + 441, + 455 + ], + "type": "text", + "content": "[37] Anton Lozhkov, Raymond Li, Loubna Ben Allal, Federico Cassano, Joel Lamy-Poirier, Nouamane Tazi, Ao Tang, Dmytro Pykhtar, Jiawei Liu, Yuxiang Wei, et al. 2024. StarCoder 2 and The Stack v2: The Next Generation. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2402.19173" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 44, + 455, + 343, + 465 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 455, + 343, + 465 + ], + "spans": [ + { + "bbox": [ + 44, + 455, + 343, + 465 + ], + "type": "text", + "content": "[38] Madnight. 2024. GitHub 2.0. https://madnight.github.io/git/#/pull_requestes/2024/1" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 44, + 466, + 441, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 466, + 441, + 485 + ], + "spans": [ + { + "bbox": [ + 44, + 466, + 441, + 485 + ], + "type": "text", + "content": "[39] Harry G. Mairson. 2004. Linear lambda calculus and PTIME-completeness. J. Funct. Program. (2004). https://doi.org/10.1017/S0956796804005131" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 44, + 485, + 379, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 485, + 379, + 495 + ], + "spans": [ + { + "bbox": [ + 44, + 485, + 379, + 495 + ], + "type": "text", + "content": "[40] Nicholas D Matsakis and Felix S Klock. 2014. The rust language. ACM SIGAda Ada Letters (2014)." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 44, + 496, + 441, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 496, + 441, + 525 + ], + "spans": [ + { + "bbox": [ + 44, + 496, + 441, + 525 + ], + "type": "text", + "content": "[41] Daniel Melcer, Nathan Fulton, Sanjay Krishna Gouda, and Haifeng Qian. 2024. Constrained Decoding for Fill-in-the-Middle Code Language Models via Efficient Left and Right Quotienting of Context-Sensitive Grammars. (2024). https://arxiv.org/abs/2402.17988" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 44, + 525, + 440, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 525, + 440, + 544 + ], + "spans": [ + { + "bbox": [ + 44, + 525, + 440, + 544 + ], + "type": "text", + "content": "[42] Microsoft. 2024. TypeScript. https://github.com/microsoft/TypeScript. Accessed on November 9, 2024, commit #ef802b1." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 44, + 545, + 440, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 545, + 440, + 565 + ], + "spans": [ + { + "bbox": [ + 44, + 545, + 440, + 565 + ], + "type": "text", + "content": "[43] John C. MITCHELL. 1990. Type Systems for Programming Languages. In Formal Models and Semantics. https://www.sciencedirect.com/science/article/pii/B9780444880741500135" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 44, + 565, + 441, + 595 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 565, + 441, + 595 + ], + "spans": [ + { + "bbox": [ + 44, + 565, + 441, + 595 + ], + "type": "text", + "content": "[44] Niklas Muennighoff, Qian Liu, Armel Randy Zebaze, Qinkai Zheng, Binyuan Hui, Terry Yue Zhuo, Swayam Singh, Xiangru Tang, Leandro von Werra, and Shayne Longpre. 2024. OctoPack: Instruction Tuning Code Large Language Models. In ICLR. https://openreview.net/forum?id=mw1PWNSWZP" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 44, + 596, + 440, + 615 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 596, + 440, + 615 + ], + "spans": [ + { + "bbox": [ + 44, + 596, + 440, + 615 + ], + "type": "text", + "content": "[45] Niels Mündler, Jingxuan He, Hao Wang, Koushik Sen, Dawn Song, and Martin Vechev. 2025. Reproduction Package for \"Type-Constrained Code Generation with Language Models\". doi:10.5281/zenodo.15355889" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 44, + 615, + 441, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 615, + 441, + 644 + ], + "spans": [ + { + "bbox": [ + 44, + 615, + 441, + 644 + ], + "type": "text", + "content": "[46] Niels Mündler, Mark Niklas Müller, Jingxuan He, and Martin Vechev. 2024. SWT-Bench: Testing and Validating Real-World Bug-Fixes with Code Agents. In NeurIPS. http://papers.nips.cc/paper_files/paper/2024/bit/94f093b41fc2666376fb1f667fe282f3-AbsAbstract-Conference.html" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 61, + 66, + 68 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 61, + 66, + 68 + ], + "spans": [ + { + "bbox": [ + 45, + 61, + 66, + 68 + ], + "type": "text", + "content": "171:24" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 165, + 60, + 441, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 60, + 441, + 69 + ], + "spans": [ + { + "bbox": [ + 165, + 60, + 441, + 69 + ], + "type": "text", + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 86, + 441, + 645 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 44, + 86, + 441, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 86, + 441, + 105 + ], + "spans": [ + { + "bbox": [ + 44, + 86, + 441, + 105 + ], + "type": "text", + "content": "[47] nielstron. 2024. Incorrect type deducted for accumulator in reduce. https://github.com/microsoft/TypeScript/issues/59999." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 107, + 411, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 107, + 411, + 116 + ], + "spans": [ + { + "bbox": [ + 45, + 107, + 411, + 116 + ], + "type": "text", + "content": "[48] nop33. 2024. Wrong inferred initial value in reduce. https://github.com/microsoft/TypeScript/issues/59863." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 117, + 403, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 117, + 403, + 126 + ], + "spans": [ + { + "bbox": [ + 45, + 117, + 403, + 126 + ], + "type": "text", + "content": "[49] OpenAI. 2023. GPT-4 Technical Report. arXiv Preprint (2023). https://doi.org/10.48550/arXiv.2303.08774" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 128, + 441, + 145 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 128, + 441, + 145 + ], + "spans": [ + { + "bbox": [ + 45, + 128, + 441, + 145 + ], + "type": "text", + "content": "[50] OpenAI. 2025. Structured Outputs. https://platform.openai.com/docs/guides/structured-outputs Accessed: March 10, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 146, + 441, + 176 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 146, + 441, + 176 + ], + "spans": [ + { + "bbox": [ + 45, + 146, + 441, + 176 + ], + "type": "text", + "content": "[51] Gabriel Orlanski, Kefan Xiao, Xavier Garcia, Jeffrey Hui, Joshua Howland, Jonathan Malmaud, Jacob Austin, Rishabh Singh, and Michele Catasta. 2023. Measuring the Impact of Programming Language Distribution. In ICML. https://proceedings.mlr.press/v202/orlanski23a.html" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 177, + 374, + 186 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 177, + 374, + 186 + ], + "spans": [ + { + "bbox": [ + 45, + 177, + 374, + 186 + ], + "type": "text", + "content": "[52] oxc project. 2024. oxc - The Javascript Oxidation Compiler. https://github.com/oxc-project/oxc." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 187, + 440, + 216 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 187, + 440, + 216 + ], + "spans": [ + { + "bbox": [ + 45, + 187, + 440, + 216 + ], + "type": "text", + "content": "[53] Rangeet Pan, Ali Reza Ibrahimzada, Rahul Krishna, Divya Sankar, Lambert Pouguem Wassi, Michele Merler, Boris Sobolev, Raju Pavuluri, Saurabh Sinha, and Reyhaneh Jabbarvand. 2024. Lost in Translation: A Study of Bugs Introduced by Large Language Models while Translating Code. In ICSE. https://doi.org/10.1145/3597503.3639226" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 45, + 217, + 440, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 217, + 440, + 246 + ], + "spans": [ + { + "bbox": [ + 45, + 217, + 440, + 246 + ], + "type": "text", + "content": "[54] Rangeet Pan, Ali Reza Ibrahimzada, Rahul Krishna, Divya Sankar, Lambert Pouguem Wassi, Michele Merler, Boris Sobolev, Raju Pavuluri, Saurabh Sinha, and Reyhaneh Jabbarvand. 2024. Lost in Translation: A Study of Bugs Introduced by Large Language Models while Translating Code. In ICSE. https://doi.org/10.1145/3597503.3639226" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 45, + 247, + 441, + 275 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 247, + 441, + 275 + ], + "spans": [ + { + "bbox": [ + 45, + 247, + 441, + 275 + ], + "type": "text", + "content": "[55] Hammond Pearce, Baleegh Ahmad, Benjamin Tan, Brendan Dolan-Gavitt, and Ramesh Karri. 2022. Asleep at the Keyboard? Assessing the Security of GitHub Copilot's Code Contributions. In S&P. https://doi.org/10.1109/SP46214.2022.9833571" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 45, + 277, + 440, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 277, + 440, + 295 + ], + "spans": [ + { + "bbox": [ + 45, + 277, + 440, + 295 + ], + "type": "text", + "content": "[56] Daniel Perelman, Sumit Gulwani, Thomas Ball, and Dan Grossman. 2012. Type-directed completion of partial expressions. In PLDI. https://doi.org/10.1145/2254064.2254098" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 45, + 296, + 441, + 324 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 296, + 441, + 324 + ], + "spans": [ + { + "bbox": [ + 45, + 296, + 441, + 324 + ], + "type": "text", + "content": "[57] Gabriel Poesia, Alex Polozov, Vu Le, Ashish Tiwari, Gustavo Soares, Christopher Meek, and Sumit Gulwani. 2022. Synchronesh: Reliable Code Generation from Pre-trained Language Models. In ICLR. https://openreview.net/forum?id=KmtVD97J43e" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 45, + 326, + 441, + 344 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 326, + 441, + 344 + ], + "spans": [ + { + "bbox": [ + 45, + 326, + 441, + 344 + ], + "type": "text", + "content": "[58] Vipula Rawte, Amit P. Sheth, and Amitava Das. 2023. A Survey of Hallucination in Large Foundation Models. arXiv Preprint (2023). https://doi.org/10.48550/arXiv.2309.05922" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 45, + 346, + 441, + 375 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 346, + 441, + 375 + ], + "spans": [ + { + "bbox": [ + 45, + 346, + 441, + 375 + ], + "type": "text", + "content": "[59] Baptiste Rozière, Jonas Gehring, Fabian Gloeckle, Sten Sootla, Itai Gat, Xiaqing Ellen Tan, Yossi Adi, Jingyu Liu, Tal Remez, Jérémy Rapin, et al. 2023. Code Llama: Open Foundation Models for Code. arXiv Preprint (2023). https://doi.org/10.48550/arXiv.2308.12950" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 45, + 376, + 440, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 376, + 440, + 396 + ], + "spans": [ + { + "bbox": [ + 45, + 376, + 440, + 396 + ], + "type": "text", + "content": "[60] Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Neural Machine Translation of Rare Words with Subword Units. In ACL. https://doi.org/10.18653/v1/p16-1162" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 45, + 396, + 440, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 396, + 440, + 415 + ], + "spans": [ + { + "bbox": [ + 45, + 396, + 440, + 415 + ], + "type": "text", + "content": "[61] Manish Shetty, Naman Jain, Adwait Godbole, Sanjit A. Seshia, and Koushik Sen. 2024. Syzygy: Dual Code-Test C to (safe) Rust Translation using LLMs and Dynamic Analysis. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2412.14234" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 45, + 416, + 440, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 416, + 440, + 434 + ], + "spans": [ + { + "bbox": [ + 45, + 416, + 440, + 434 + ], + "type": "text", + "content": "[62] Vince Szabo, Dominik Winterer, and Zhendong Su. 2024. Compilation Quotient (CQ): A Metric for the Compilation Hardness of Programming Languages. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2406.04778" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 45, + 436, + 441, + 465 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 436, + 441, + 465 + ], + "spans": [ + { + "bbox": [ + 45, + 436, + 441, + 465 + ], + "type": "text", + "content": "[63] Florian Tambon, Arghavan Moradi Dakhel, Amin Nikanjam, Foutse Khomh, Michel C. Desmarais, and Giuliano Antoniol. 2025. Bugs in large language models generated code: an empirical study. Empir. Softw. Eng. (2025). https://doi.org/10.1007/s10664-025-10614-4" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 45, + 465, + 440, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 465, + 440, + 495 + ], + "spans": [ + { + "bbox": [ + 45, + 465, + 440, + 495 + ], + "type": "text", + "content": "[64] Gemma Team, Morgane Riviere, Shreya Pathak, Pier Giuseppe Sessa, Cassidy Hardin, Surya Bhupatiraju, Léonard Hussenot, Thomas Mesnard, Bobak Shahriari, Alexandre Ramé, et al. 2024. Gemma 2: Improving Open Language Models at a Practical Size. arXiv Preprint (2024). https://arxiv.org/abs/2408.00118" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 45, + 496, + 440, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 496, + 440, + 514 + ], + "spans": [ + { + "bbox": [ + 45, + 496, + 440, + 514 + ], + "type": "text", + "content": "[65] Yun-Da Tsai, Mingjie Liu, and Haoxing Ren. 2024. Code Less, Align More: Efficient LLM Fine-tuning for Code Generation with Data Pruning. (2024). https://doi.org/10.48550/arXiv.2407.05040" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 45, + 516, + 440, + 535 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 516, + 440, + 535 + ], + "spans": [ + { + "bbox": [ + 45, + 516, + 440, + 535 + ], + "type": "text", + "content": "[66] Shubham Ugare, Tarun Suresh, Hangoo Kang, Sasa Misailovic, and Gagandeep Singh. 2024. SynCode: LLM Generation with Grammar Augmentation. ArXiv Preprint (2024). https://arxiv.org/abs/2403.01632" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 45, + 536, + 440, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 536, + 440, + 555 + ], + "spans": [ + { + "bbox": [ + 45, + 536, + 440, + 555 + ], + "type": "text", + "content": "[67] Pawel Urzyczyn. 1997. Inhabitation in Typed Lambda-Calculi (A Syntactic Approach). In TLCA (Lecture Notes in Computer Science). https://doi.org/10.1007/3-540-62688-3_47" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 45, + 555, + 440, + 574 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 555, + 440, + 574 + ], + "spans": [ + { + "bbox": [ + 45, + 555, + 440, + 574 + ], + "type": "text", + "content": "[68] Heidi Vella. 2024. Google turns to AI to write new code; Workforce reduced. https://aibusiness.com/data/google-turns-to-ai-to-write-new-code-workforce-reduced" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 45, + 576, + 440, + 595 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 576, + 440, + 595 + ], + "spans": [ + { + "bbox": [ + 45, + 576, + 440, + 595 + ], + "type": "text", + "content": "[69] Yuxiang Wei, Chunqiu Steven Xia, and Lingming Zhang. 2023. Copiloting the Copilots: Fusing Large Language Models with Completion Engines for Automated Program Repair. In ESEC/FSE. https://doi.org/10.1145/3611643.3616271" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 45, + 596, + 441, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 596, + 441, + 624 + ], + "spans": [ + { + "bbox": [ + 45, + 596, + 441, + 624 + ], + "type": "text", + "content": "[70] Martin Weyssow, Xin Zhou, Kisub Kim, David Lo, and Houari A. Sahraoui. 2023. Exploring Parameter-Efficient Fine-Tuning Techniques for Code Generation with Large Language Models. arXiv Preprint (2023). https://doi.org/10.48550/arXiv.2308.10462" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 45, + 625, + 441, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 625, + 441, + 645 + ], + "spans": [ + { + "bbox": [ + 45, + 625, + 441, + 645 + ], + "type": "text", + "content": "[71] Brandon T. Willard and Rémi Louf. 2023. Efficient Guided Generation for Large Language Models. arXiv Preprint (2023). https://doi.org/10.48550/arXiv.2307.09702" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 43, + 60, + 241, + 70 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 60, + 241, + 70 + ], + "spans": [ + { + "bbox": [ + 43, + 60, + 241, + 70 + ], + "type": "text", + "content": "Type-Constrained Code Generation with Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 419, + 60, + 440, + 69 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 419, + 60, + 440, + 69 + ], + "spans": [ + { + "bbox": [ + 419, + 60, + 440, + 69 + ], + "type": "text", + "content": "171:25" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "bbox": [ + 43, + 86, + 443, + 167 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 43, + 86, + 443, + 115 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 86, + 443, + 115 + ], + "spans": [ + { + "bbox": [ + 43, + 86, + 443, + 115 + ], + "type": "text", + "content": "[72] Andy Yang, David Chiang, and Dana Angluin. 2024. Masked Hard-Attention Transformers Recognize Exactly the Star-Free Languages. In NeurIPS. http://papers.nips.cc/paper_files/paper/2024/bit/13d7f172259b11b230cc5da8768abc5f-AAbstract-Conference.html" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 43, + 116, + 442, + 136 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 116, + 442, + 136 + ], + "spans": [ + { + "bbox": [ + 43, + 116, + 442, + 136 + ], + "type": "text", + "content": "[73] An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. 2024. Qwen2.5 Technical Report. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2412.15115" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 43, + 136, + 442, + 167 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 136, + 442, + 167 + ], + "spans": [ + { + "bbox": [ + 43, + 136, + 442, + 167 + ], + "type": "text", + "content": "[74] Quanjun Zhang, Chunrong Fang, Yang Xie, Yuxiang Ma, Weisong Sun, Yun Yang, and Zhenyu Chen. 2024. A Systematic Literature Review on Large Language Models for Automated Program Repair. arXiv Preprint (2024). https://doi.org/10.48550/arXiv.2405.01466" + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 43, + 179, + 239, + 190 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 179, + 239, + 190 + ], + "spans": [ + { + "bbox": [ + 43, + 179, + 239, + 190 + ], + "type": "text", + "content": "A Detailed Prefix Automaton Definitions" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 42, + 193, + 439, + 207 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 193, + 439, + 207 + ], + "spans": [ + { + "bbox": [ + 42, + 193, + 439, + 207 + ], + "type": "text", + "content": "In this section, we provide more detailed definitions and analysis of the various automata for " + }, + { + "bbox": [ + 42, + 193, + 439, + 207 + ], + "type": "inline_equation", + "content": "L_{B}" + }, + { + "bbox": [ + 42, + 193, + 439, + 207 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 43, + 216, + 139, + 226 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 216, + 139, + 226 + ], + "spans": [ + { + "bbox": [ + 43, + 216, + 139, + 226 + ], + "type": "text", + "content": "A.1 Base Automata" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 42, + 230, + 442, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 230, + 442, + 255 + ], + "spans": [ + { + "bbox": [ + 42, + 230, + 442, + 255 + ], + "type": "text", + "content": "We now provide detailed definitions for the base prefix automata introduced at the end of §3.2: union, concatenation, Kleene-Star, and terminal." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 42, + 260, + 441, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 260, + 441, + 285 + ], + "spans": [ + { + "bbox": [ + 42, + 260, + 441, + 285 + ], + "type": "text", + "content": "Union. For the union " + }, + { + "bbox": [ + 42, + 260, + 441, + 285 + ], + "type": "inline_equation", + "content": "A_X \\cup A_Y" + }, + { + "bbox": [ + 42, + 260, + 441, + 285 + ], + "type": "text", + "content": ", we define the resulting sets of initial states and accepting states as " + }, + { + "bbox": [ + 42, + 260, + 441, + 285 + ], + "type": "inline_equation", + "content": "I \\coloneqq I_X \\cup I_Y" + }, + { + "bbox": [ + 42, + 260, + 441, + 285 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 42, + 260, + 441, + 285 + ], + "type": "inline_equation", + "content": "F \\coloneqq F_X \\cup F_Y" + }, + { + "bbox": [ + 42, + 260, + 441, + 285 + ], + "type": "text", + "content": ", respectively. The transition function is defined as follows:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 175, + 289, + 307, + 321 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 175, + 289, + 307, + 321 + ], + "spans": [ + { + "bbox": [ + 175, + 289, + 307, + 321 + ], + "type": "interline_equation", + "content": "\\delta (q, c) := \\left\\{ \\begin{array}{l l} \\delta_ {X} (q, c) & \\text {i f} q \\in Q _ {X} \\\\ \\delta_ {Y} (q, c) & \\text {i f} q \\in Q _ {Y}. \\end{array} \\right.", + "image_path": "9a9fe5e21ca0fc4ade3648a646c2e9bf76dafd38627781c95101a0ee50d528d5.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 42, + 324, + 441, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 324, + 441, + 350 + ], + "spans": [ + { + "bbox": [ + 42, + 324, + 441, + 350 + ], + "type": "text", + "content": "To show that the language parsed by this automaton is indeed the union " + }, + { + "bbox": [ + 42, + 324, + 441, + 350 + ], + "type": "inline_equation", + "content": "L(A_{X} \\cup A_{Y}) = L(A_{X}) \\cup L(A_{Y})" + }, + { + "bbox": [ + 42, + 324, + 441, + 350 + ], + "type": "text", + "content": ", we employ a short helper lemma, which can be shown inductively." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 43, + 355, + 442, + 383 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 355, + 442, + 383 + ], + "spans": [ + { + "bbox": [ + 43, + 355, + 442, + 383 + ], + "type": "text", + "content": "LEMMA 7. The set of the reachable states from a set of states " + }, + { + "bbox": [ + 43, + 355, + 442, + 383 + ], + "type": "inline_equation", + "content": "\\mathbf{q}" + }, + { + "bbox": [ + 43, + 355, + 442, + 383 + ], + "type": "text", + "content": " is equal to the union of reachable states from each state in " + }, + { + "bbox": [ + 43, + 355, + 442, + 383 + ], + "type": "inline_equation", + "content": "\\mathbf{q}" + }, + { + "bbox": [ + 43, + 355, + 442, + 383 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 43, + 355, + 442, + 383 + ], + "type": "inline_equation", + "content": "\\gamma (\\mathbf{q},s) = \\bigcup_{q\\in \\mathbf{q}}\\gamma (q,s)" + }, + { + "bbox": [ + 43, + 355, + 442, + 383 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 42, + 386, + 442, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 386, + 442, + 422 + ], + "spans": [ + { + "bbox": [ + 42, + 386, + 442, + 422 + ], + "type": "text", + "content": "Since the states are distinct and we merely combine the transition functions of both automata, using the lemma, we can quickly see that the language parsed is indeed the union. Moreover, if both " + }, + { + "bbox": [ + 42, + 386, + 442, + 422 + ], + "type": "inline_equation", + "content": "A_{X}" + }, + { + "bbox": [ + 42, + 386, + 442, + 422 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 42, + 386, + 442, + 422 + ], + "type": "inline_equation", + "content": "A_{Y}" + }, + { + "bbox": [ + 42, + 386, + 442, + 422 + ], + "type": "text", + "content": " are prefix automata, this also holds for " + }, + { + "bbox": [ + 42, + 386, + 442, + 422 + ], + "type": "inline_equation", + "content": "A_{X} \\cup A_{Y}" + }, + { + "bbox": [ + 42, + 386, + 442, + 422 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 42, + 429, + 441, + 452 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 429, + 441, + 452 + ], + "spans": [ + { + "bbox": [ + 42, + 429, + 441, + 452 + ], + "type": "text", + "content": "Concatenation. For the concatenation automaton " + }, + { + "bbox": [ + 42, + 429, + 441, + 452 + ], + "type": "inline_equation", + "content": "A_{XY}" + }, + { + "bbox": [ + 42, + 429, + 441, + 452 + ], + "type": "text", + "content": ", we define " + }, + { + "bbox": [ + 42, + 429, + 441, + 452 + ], + "type": "inline_equation", + "content": "I \\coloneqq I_X" + }, + { + "bbox": [ + 42, + 429, + 441, + 452 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 42, + 429, + 441, + 452 + ], + "type": "inline_equation", + "content": "F \\coloneqq F_Y" + }, + { + "bbox": [ + 42, + 429, + 441, + 452 + ], + "type": "text", + "content": ", and the transition function as follows:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 139, + 455, + 344, + 500 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 455, + 344, + 500 + ], + "spans": [ + { + "bbox": [ + 139, + 455, + 344, + 500 + ], + "type": "interline_equation", + "content": "\\delta_ {X Y} (q, c) := \\left\\{ \\begin{array}{l l} \\delta_ {X} (q, c) & \\text {i f} q \\in Q _ {X} \\backslash F _ {X} \\\\ \\delta_ {X} (q, c) \\cup \\delta_ {Y} (I _ {Y}, c) & \\text {i f} q \\in F _ {X} \\\\ \\delta_ {Y} (q, c) & \\text {i f} q \\in Q _ {Y}. \\end{array} \\right.", + "image_path": "60fa4e5789c10a64865475b022f3cb99f35909315ba98a7414da61c6223607ad.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 42, + 503, + 441, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 503, + 441, + 563 + ], + "spans": [ + { + "bbox": [ + 42, + 503, + 441, + 563 + ], + "type": "text", + "content": "Informally, concatenation preserves the parsing behavior of both " + }, + { + "bbox": [ + 42, + 503, + 441, + 563 + ], + "type": "inline_equation", + "content": "A_{X}" + }, + { + "bbox": [ + 42, + 503, + 441, + 563 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 42, + 503, + 441, + 563 + ], + "type": "inline_equation", + "content": "A_{Y}" + }, + { + "bbox": [ + 42, + 503, + 441, + 563 + ], + "type": "text", + "content": " in their respective states. When " + }, + { + "bbox": [ + 42, + 503, + 441, + 563 + ], + "type": "inline_equation", + "content": "A_{XY}" + }, + { + "bbox": [ + 42, + 503, + 441, + 563 + ], + "type": "text", + "content": " reaches an accepting state of " + }, + { + "bbox": [ + 42, + 503, + 441, + 563 + ], + "type": "inline_equation", + "content": "A_{X}" + }, + { + "bbox": [ + 42, + 503, + 441, + 563 + ], + "type": "text", + "content": " and receives another input character, it either remains in " + }, + { + "bbox": [ + 42, + 503, + 441, + 563 + ], + "type": "inline_equation", + "content": "A_{X}" + }, + { + "bbox": [ + 42, + 503, + 441, + 563 + ], + "type": "text", + "content": " or transitions to " + }, + { + "bbox": [ + 42, + 503, + 441, + 563 + ], + "type": "inline_equation", + "content": "A_{Y}" + }, + { + "bbox": [ + 42, + 503, + 441, + 563 + ], + "type": "text", + "content": ", as defined in the second case of " + }, + { + "bbox": [ + 42, + 503, + 441, + 563 + ], + "type": "inline_equation", + "content": "\\delta_{XY}" + }, + { + "bbox": [ + 42, + 503, + 441, + 563 + ], + "type": "text", + "content": ". Essentially, this maintains outgoing edges from accepting states in " + }, + { + "bbox": [ + 42, + 503, + 441, + 563 + ], + "type": "inline_equation", + "content": "A_{X}" + }, + { + "bbox": [ + 42, + 503, + 441, + 563 + ], + "type": "text", + "content": " while adding edges from these accepting states to initial states of " + }, + { + "bbox": [ + 42, + 503, + 441, + 563 + ], + "type": "inline_equation", + "content": "A_{Y}" + }, + { + "bbox": [ + 42, + 503, + 441, + 563 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "spans": [ + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "text", + "content": "It follows from a similar argument that " + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "inline_equation", + "content": "L(A_{XY}) = L(A_X) \\circ L(A_Y)" + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "inline_equation", + "content": "L(A_X) \\circ L(A_Y)" + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "text", + "content": " is defined as " + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "inline_equation", + "content": "\\{s_X \\circ s_Y \\mid s_X \\in L(A_X), s_Y \\in L(A_Y)\\}" + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "text", + "content": ". We first show " + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "inline_equation", + "content": "L(A_{XY}) \\subseteq L(A_X) \\circ L(A_Y)" + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "text", + "content": ". Due to (P1), we can always split any " + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "inline_equation", + "content": "s \\in L(A_{XY})" + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "text", + "content": " into " + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "inline_equation", + "content": "s_X" + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "text", + "content": " that extends from " + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "inline_equation", + "content": "I_X" + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "inline_equation", + "content": "F_X" + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "inline_equation", + "content": "s_Y" + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "text", + "content": " that extends from " + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "inline_equation", + "content": "I_Y" + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "inline_equation", + "content": "F_Y" + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "text", + "content": ". Then " + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "inline_equation", + "content": "s_X \\in L(A_X)" + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "inline_equation", + "content": "s_Y \\in L(A_Y)" + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "text", + "content": ". For " + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "inline_equation", + "content": "L(A_X) \\circ L(A_Y) \\subseteq L(A_X \\circ A_Y)" + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "text", + "content": ", we pick any " + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "inline_equation", + "content": "s_X \\circ s_Y" + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "inline_equation", + "content": "L(A_X) \\circ L(A_Y)" + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "text", + "content": " and parse it using " + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "inline_equation", + "content": "A_{XY}" + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "text", + "content": ". We observe that it will first traverse from " + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "inline_equation", + "content": "I_X" + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "inline_equation", + "content": "F_X" + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "text", + "content": " consuming " + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "inline_equation", + "content": "s_X" + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "text", + "content": ", and then transition through " + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "inline_equation", + "content": "I_Y" + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "inline_equation", + "content": "F_Y" + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "text", + "content": " by consuming " + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "inline_equation", + "content": "s_Y" + }, + { + "bbox": [ + 42, + 563, + 441, + 635 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 42, + 635, + 441, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 635, + 441, + 660 + ], + "spans": [ + { + "bbox": [ + 42, + 635, + 441, + 660 + ], + "type": "text", + "content": "Moreover " + }, + { + "bbox": [ + 42, + 635, + 441, + 660 + ], + "type": "inline_equation", + "content": "A_{XY}" + }, + { + "bbox": [ + 42, + 635, + 441, + 660 + ], + "type": "text", + "content": " is a prefix automaton, if " + }, + { + "bbox": [ + 42, + 635, + 441, + 660 + ], + "type": "inline_equation", + "content": "A_{X}" + }, + { + "bbox": [ + 42, + 635, + 441, + 660 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 42, + 635, + 441, + 660 + ], + "type": "inline_equation", + "content": "A_{Y}" + }, + { + "bbox": [ + 42, + 635, + 441, + 660 + ], + "type": "text", + "content": " are prefix automata and " + }, + { + "bbox": [ + 42, + 635, + 441, + 660 + ], + "type": "inline_equation", + "content": "L(A_{Y}) \\neq \\emptyset" + }, + { + "bbox": [ + 42, + 635, + 441, + 660 + ], + "type": "text", + "content": ". Since " + }, + { + "bbox": [ + 42, + 635, + 441, + 660 + ], + "type": "inline_equation", + "content": "A_{X}" + }, + { + "bbox": [ + 42, + 635, + 441, + 660 + ], + "type": "text", + "content": " is a prefix automaton, we can reach " + }, + { + "bbox": [ + 42, + 635, + 441, + 660 + ], + "type": "inline_equation", + "content": "F_{X}" + }, + { + "bbox": [ + 42, + 635, + 441, + 660 + ], + "type": "text", + "content": " from any state in " + }, + { + "bbox": [ + 42, + 635, + 441, + 660 + ], + "type": "inline_equation", + "content": "Q_{X}" + }, + { + "bbox": [ + 42, + 635, + 441, + 660 + ], + "type": "text", + "content": ". From " + }, + { + "bbox": [ + 42, + 635, + 441, + 660 + ], + "type": "inline_equation", + "content": "F_{X}" + }, + { + "bbox": [ + 42, + 635, + 441, + 660 + ], + "type": "text", + "content": " we additionally reach" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 44, + 60, + 67, + 69 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 60, + 67, + 69 + ], + "spans": [ + { + "bbox": [ + 44, + 60, + 67, + 69 + ], + "type": "text", + "content": "171:26" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 165, + 59, + 441, + 71 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 59, + 441, + 71 + ], + "spans": [ + { + "bbox": [ + 165, + 59, + 441, + 71 + ], + "type": "text", + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "bbox": [ + 42, + 84, + 440, + 110 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 84, + 440, + 110 + ], + "spans": [ + { + "bbox": [ + 42, + 84, + 440, + 110 + ], + "type": "inline_equation", + "content": "I_{Y} \\subseteq Q_{Y}" + }, + { + "bbox": [ + 42, + 84, + 440, + 110 + ], + "type": "text", + "content": ". Since " + }, + { + "bbox": [ + 42, + 84, + 440, + 110 + ], + "type": "inline_equation", + "content": "A_{Y}" + }, + { + "bbox": [ + 42, + 84, + 440, + 110 + ], + "type": "text", + "content": " is a prefix automaton, we can reach " + }, + { + "bbox": [ + 42, + 84, + 440, + 110 + ], + "type": "inline_equation", + "content": "F_{Y}" + }, + { + "bbox": [ + 42, + 84, + 440, + 110 + ], + "type": "text", + "content": " for any state in " + }, + { + "bbox": [ + 42, + 84, + 440, + 110 + ], + "type": "inline_equation", + "content": "Q_{Y}" + }, + { + "bbox": [ + 42, + 84, + 440, + 110 + ], + "type": "text", + "content": ". This construction is a prefix automaton only if " + }, + { + "bbox": [ + 42, + 84, + 440, + 110 + ], + "type": "inline_equation", + "content": "I_{Y} \\neq \\emptyset" + }, + { + "bbox": [ + 42, + 84, + 440, + 110 + ], + "type": "text", + "content": ", which, due to the prefix property, is equivalent to " + }, + { + "bbox": [ + 42, + 84, + 440, + 110 + ], + "type": "inline_equation", + "content": "L(A_{Y}) \\neq \\emptyset" + }, + { + "bbox": [ + 42, + 84, + 440, + 110 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 42, + 115, + 440, + 151 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 115, + 440, + 151 + ], + "spans": [ + { + "bbox": [ + 42, + 115, + 440, + 151 + ], + "type": "text", + "content": "Kleene-Star. We define the Kleene-Star automaton " + }, + { + "bbox": [ + 42, + 115, + 440, + 151 + ], + "type": "inline_equation", + "content": "A_{\\overline{X}}" + }, + { + "bbox": [ + 42, + 115, + 440, + 151 + ], + "type": "text", + "content": " that parses indefinite repetitions of words accepted by " + }, + { + "bbox": [ + 42, + 115, + 440, + 151 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 42, + 115, + 440, + 151 + ], + "type": "text", + "content": ". First, we consider all initial states as final states, i.e., we ensure " + }, + { + "bbox": [ + 42, + 115, + 440, + 151 + ], + "type": "inline_equation", + "content": "I_X \\subseteq F_{\\overline{X}}" + }, + { + "bbox": [ + 42, + 115, + 440, + 151 + ], + "type": "text", + "content": ". Then we add transitions to the transition function " + }, + { + "bbox": [ + 42, + 115, + 440, + 151 + ], + "type": "inline_equation", + "content": "\\delta_X" + }, + { + "bbox": [ + 42, + 115, + 440, + 151 + ], + "type": "text", + "content": " from the final states " + }, + { + "bbox": [ + 42, + 115, + 440, + 151 + ], + "type": "inline_equation", + "content": "F_X" + }, + { + "bbox": [ + 42, + 115, + 440, + 151 + ], + "type": "text", + "content": " back to the initial states " + }, + { + "bbox": [ + 42, + 115, + 440, + 151 + ], + "type": "inline_equation", + "content": "I_X" + }, + { + "bbox": [ + 42, + 115, + 440, + 151 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 142, + 156, + 340, + 188 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 142, + 156, + 340, + 188 + ], + "spans": [ + { + "bbox": [ + 142, + 156, + 340, + 188 + ], + "type": "interline_equation", + "content": "\\delta_ {\\overline {{X}}} (q _ {X}, c) := \\left\\{ \\begin{array}{l l} \\delta_ {X} (q _ {X}, c) & \\text {i f q \\not \\in F _ {X}} \\\\ \\delta_ {X} (q _ {X}, c) \\cup \\delta (I _ {X}, c) & \\text {i f q _ {X} \\in F _ {X}}. \\end{array} \\right.", + "image_path": "7bd7f054612c4b5ce3ec157716303576583113e6a0fcb2d75dc6408106b57761.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 42, + 192, + 440, + 252 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 192, + 440, + 252 + ], + "spans": [ + { + "bbox": [ + 42, + 192, + 440, + 252 + ], + "type": "text", + "content": "We can quickly see that " + }, + { + "bbox": [ + 42, + 192, + 440, + 252 + ], + "type": "inline_equation", + "content": "L(A_{\\overline{X}}) = \\{\\overline{s} \\mid s \\in L(A_X)\\}" + }, + { + "bbox": [ + 42, + 192, + 440, + 252 + ], + "type": "text", + "content": ", with the same argument as the concatenation automaton. Additionally, because the initial states are accepting, the empty word (zero repetitions) is in " + }, + { + "bbox": [ + 42, + 192, + 440, + 252 + ], + "type": "inline_equation", + "content": "L(A_{\\overline{X}})" + }, + { + "bbox": [ + 42, + 192, + 440, + 252 + ], + "type": "text", + "content": ". We similarly see that this is a prefix automaton if " + }, + { + "bbox": [ + 42, + 192, + 440, + 252 + ], + "type": "inline_equation", + "content": "A_{X}" + }, + { + "bbox": [ + 42, + 192, + 440, + 252 + ], + "type": "text", + "content": " is a prefix automaton. Note that here " + }, + { + "bbox": [ + 42, + 192, + 440, + 252 + ], + "type": "inline_equation", + "content": "L(A_{X}) \\neq \\emptyset" + }, + { + "bbox": [ + 42, + 192, + 440, + 252 + ], + "type": "text", + "content": " is not required. This is because if " + }, + { + "bbox": [ + 42, + 192, + 440, + 252 + ], + "type": "inline_equation", + "content": "L(A_{X}) \\neq \\emptyset" + }, + { + "bbox": [ + 42, + 192, + 440, + 252 + ], + "type": "text", + "content": ", then " + }, + { + "bbox": [ + 42, + 192, + 440, + 252 + ], + "type": "inline_equation", + "content": "A_{\\overline{X}} = A_{X} = A_{\\emptyset}" + }, + { + "bbox": [ + 42, + 192, + 440, + 252 + ], + "type": "text", + "content": ", which is still a prefix automaton." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 42, + 259, + 440, + 294 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 259, + 440, + 294 + ], + "spans": [ + { + "bbox": [ + 42, + 259, + 440, + 294 + ], + "type": "text", + "content": "Terminals. The terminal automaton " + }, + { + "bbox": [ + 42, + 259, + 440, + 294 + ], + "type": "inline_equation", + "content": "A_{\\mathsf{S}}" + }, + { + "bbox": [ + 42, + 259, + 440, + 294 + ], + "type": "text", + "content": " parses exactly the terminal S. They accept the usual alphabet " + }, + { + "bbox": [ + 42, + 259, + 440, + 294 + ], + "type": "inline_equation", + "content": "\\Sigma" + }, + { + "bbox": [ + 42, + 259, + 440, + 294 + ], + "type": "text", + "content": " and feature the states " + }, + { + "bbox": [ + 42, + 259, + 440, + 294 + ], + "type": "inline_equation", + "content": "Q \\coloneqq \\{q_{\\mathsf{s}} \\mid \\mathsf{s} \\text{ is a suffix of S}\\}" + }, + { + "bbox": [ + 42, + 259, + 440, + 294 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 42, + 259, + 440, + 294 + ], + "type": "inline_equation", + "content": "F \\coloneqq \\{q_{\\varepsilon}\\}" + }, + { + "bbox": [ + 42, + 259, + 440, + 294 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 42, + 259, + 440, + 294 + ], + "type": "inline_equation", + "content": "I \\coloneqq \\{q_{\\mathsf{S}}\\}" + }, + { + "bbox": [ + 42, + 259, + 440, + 294 + ], + "type": "text", + "content": ". The transition function " + }, + { + "bbox": [ + 42, + 259, + 440, + 294 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 42, + 259, + 440, + 294 + ], + "type": "text", + "content": " is defined as follows:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 174, + 298, + 308, + 329 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 174, + 298, + 308, + 329 + ], + "spans": [ + { + "bbox": [ + 174, + 298, + 308, + 329 + ], + "type": "interline_equation", + "content": "\\delta (q _ {s}, c) := \\left\\{ \\begin{array}{l l} \\{q _ {s ^ {\\prime}} \\} & \\text {i f c \\circ s ^ {\\prime} = s} \\\\ \\varnothing & \\text {o t h e r w i s e .} \\end{array} \\right.", + "image_path": "b8dd3da9566767a457ff25647916a8656a65423e1b578756cff759a72841a378.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 42, + 334, + 440, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 334, + 440, + 394 + ], + "spans": [ + { + "bbox": [ + 42, + 334, + 440, + 394 + ], + "type": "text", + "content": "Clearly " + }, + { + "bbox": [ + 42, + 334, + 440, + 394 + ], + "type": "inline_equation", + "content": "A_{\\mathfrak{S}}" + }, + { + "bbox": [ + 42, + 334, + 440, + 394 + ], + "type": "text", + "content": " is a prefix automaton. We can show inductively that for any " + }, + { + "bbox": [ + 42, + 334, + 440, + 394 + ], + "type": "inline_equation", + "content": "s: \\gamma(q_{s}, s') = \\{q_{\\varepsilon}\\} \\Longleftrightarrow s = s'" + }, + { + "bbox": [ + 42, + 334, + 440, + 394 + ], + "type": "text", + "content": ", and thus " + }, + { + "bbox": [ + 42, + 334, + 440, + 394 + ], + "type": "inline_equation", + "content": "L(A_{\\mathfrak{S}}) = \\{\\mathfrak{S}\\}" + }, + { + "bbox": [ + 42, + 334, + 440, + 394 + ], + "type": "text", + "content": ". With a simple modification, we introduce " + }, + { + "bbox": [ + 42, + 334, + 440, + 394 + ], + "type": "inline_equation", + "content": "A_{\\mathfrak{s}}^{W}" + }, + { + "bbox": [ + 42, + 334, + 440, + 394 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 42, + 334, + 440, + 394 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 42, + 334, + 440, + 394 + ], + "type": "text", + "content": " denotes whitespace characters. The transition function is defined as " + }, + { + "bbox": [ + 42, + 334, + 440, + 394 + ], + "type": "inline_equation", + "content": "\\delta(q_{\\mathfrak{s}}^{W}, c) := \\{q_{\\mathfrak{s}}^{W}\\}" + }, + { + "bbox": [ + 42, + 334, + 440, + 394 + ], + "type": "text", + "content": " if " + }, + { + "bbox": [ + 42, + 334, + 440, + 394 + ], + "type": "inline_equation", + "content": "c \\in W" + }, + { + "bbox": [ + 42, + 334, + 440, + 394 + ], + "type": "text", + "content": "; otherwise, " + }, + { + "bbox": [ + 42, + 334, + 440, + 394 + ], + "type": "inline_equation", + "content": "\\delta(q_{c \\circ s}^{W}, t) := \\{q_{\\mathfrak{s}}^{W}\\}" + }, + { + "bbox": [ + 42, + 334, + 440, + 394 + ], + "type": "text", + "content": ". This allows arbitrary whitespace before parsing " + }, + { + "bbox": [ + 42, + 334, + 440, + 394 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 42, + 334, + 440, + 394 + ], + "type": "text", + "content": ". This is how we implement syntactic indifference to whitespace between terminals." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 43, + 403, + 124, + 416 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 403, + 124, + 416 + ], + "spans": [ + { + "bbox": [ + 43, + 403, + 124, + 416 + ], + "type": "text", + "content": "A.2 Expressions" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 42, + 419, + 440, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 419, + 440, + 442 + ], + "spans": [ + { + "bbox": [ + 42, + 419, + 440, + 442 + ], + "type": "text", + "content": "Expressions are parsed using recursive automatons as introduced in §3.4. In this part of the Appendix, we describe in more detail how information is passed between states." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 42, + 449, + 440, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 449, + 440, + 509 + ], + "spans": [ + { + "bbox": [ + 42, + 449, + 440, + 509 + ], + "type": "text", + "content": "Notation. In the following, we will implicitly assume that " + }, + { + "bbox": [ + 42, + 449, + 440, + 509 + ], + "type": "inline_equation", + "content": "\\delta(q, c) = \\emptyset" + }, + { + "bbox": [ + 42, + 449, + 440, + 509 + ], + "type": "text", + "content": " if not explicitly defined otherwise, making notation more concise. For any state, we access the following information through dot notation or the special notation on the state, which we assume is passed to subsequent states through the transition function (unless otherwise stated). This information is alternatively passed through to entire automata in composite automata, e.g., in " + }, + { + "bbox": [ + 42, + 449, + 440, + 509 + ], + "type": "inline_equation", + "content": "A_{XY}" + }, + { + "bbox": [ + 42, + 449, + 440, + 509 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 42, + 449, + 440, + 509 + ], + "type": "inline_equation", + "content": "A_X" + }, + { + "bbox": [ + 42, + 449, + 440, + 509 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 42, + 449, + 440, + 509 + ], + "type": "inline_equation", + "content": "A_Y" + }, + { + "bbox": [ + 42, + 449, + 440, + 509 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 52, + 512, + 440, + 607 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 52, + 512, + 338, + 524 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 512, + 338, + 524 + ], + "spans": [ + { + "bbox": [ + 52, + 512, + 338, + 524 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 52, + 512, + 338, + 524 + ], + "type": "inline_equation", + "content": "q \\in F_X" + }, + { + "bbox": [ + 52, + 512, + 338, + 524 + ], + "type": "text", + "content": ": Whether state " + }, + { + "bbox": [ + 52, + 512, + 338, + 524 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 52, + 512, + 338, + 524 + ], + "type": "text", + "content": " is an accepting state of the automaton " + }, + { + "bbox": [ + 52, + 512, + 338, + 524 + ], + "type": "inline_equation", + "content": "A_X" + }, + { + "bbox": [ + 52, + 512, + 338, + 524 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 52, + 524, + 338, + 535 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 524, + 338, + 535 + ], + "spans": [ + { + "bbox": [ + 52, + 524, + 338, + 535 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 52, + 524, + 338, + 535 + ], + "type": "inline_equation", + "content": "q. \\Gamma" + }, + { + "bbox": [ + 52, + 524, + 338, + 535 + ], + "type": "text", + "content": ": The type environment based on state " + }, + { + "bbox": [ + 52, + 524, + 338, + 535 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 52, + 524, + 338, + 535 + ], + "type": "text", + "content": " currently being parsed." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 52, + 536, + 440, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 536, + 440, + 560 + ], + "spans": [ + { + "bbox": [ + 52, + 536, + 440, + 560 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 52, + 536, + 440, + 560 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 52, + 536, + 440, + 560 + ], + "type": "text", + "content": ".LHS: The left-hand side expression of an extending expression represented by state " + }, + { + "bbox": [ + 52, + 536, + 440, + 560 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 52, + 536, + 440, + 560 + ], + "type": "text", + "content": ", i.e., when extending " + }, + { + "bbox": [ + 52, + 536, + 440, + 560 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 52, + 536, + 440, + 560 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 52, + 536, + 440, + 560 + ], + "type": "inline_equation", + "content": "Y" + }, + { + "bbox": [ + 52, + 536, + 440, + 560 + ], + "type": "text", + "content": " and currently parsing " + }, + { + "bbox": [ + 52, + 536, + 440, + 560 + ], + "type": "inline_equation", + "content": "q_{Y}" + }, + { + "bbox": [ + 52, + 536, + 440, + 560 + ], + "type": "text", + "content": ", then " + }, + { + "bbox": [ + 52, + 536, + 440, + 560 + ], + "type": "inline_equation", + "content": "q_{Y}" + }, + { + "bbox": [ + 52, + 536, + 440, + 560 + ], + "type": "text", + "content": ".LHS = " + }, + { + "bbox": [ + 52, + 536, + 440, + 560 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 52, + 536, + 440, + 560 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 52, + 560, + 440, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 560, + 440, + 596 + ], + "spans": [ + { + "bbox": [ + 52, + 560, + 440, + 596 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 52, + 560, + 440, + 596 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 52, + 560, + 440, + 596 + ], + "type": "text", + "content": ".TYP: The described type of the last coherent expression that this state belongs to. This is only defined for accepting states. Generally, we ensure that when some expression " + }, + { + "bbox": [ + 52, + 560, + 440, + 596 + ], + "type": "inline_equation", + "content": "e" + }, + { + "bbox": [ + 52, + 560, + 440, + 596 + ], + "type": "text", + "content": " was parsed, the corresponding state " + }, + { + "bbox": [ + 52, + 560, + 440, + 596 + ], + "type": "inline_equation", + "content": "q_{e}" + }, + { + "bbox": [ + 52, + 560, + 440, + 596 + ], + "type": "text", + "content": " has attribute " + }, + { + "bbox": [ + 52, + 560, + 440, + 596 + ], + "type": "inline_equation", + "content": "q_{e}" + }, + { + "bbox": [ + 52, + 560, + 440, + 596 + ], + "type": "text", + "content": ".TYP such that " + }, + { + "bbox": [ + 52, + 560, + 440, + 596 + ], + "type": "inline_equation", + "content": "q_{e} \\Gamma \\vdash e : q_{e}" + }, + { + "bbox": [ + 52, + 560, + 440, + 596 + ], + "type": "text", + "content": ".TYP." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 52, + 596, + 249, + 607 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 596, + 249, + 607 + ], + "spans": [ + { + "bbox": [ + 52, + 596, + 249, + 607 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 52, + 596, + 249, + 607 + ], + "type": "inline_equation", + "content": "q \\downarrow T" + }, + { + "bbox": [ + 52, + 596, + 249, + 607 + ], + "type": "text", + "content": ": Type " + }, + { + "bbox": [ + 52, + 596, + 249, + 607 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 52, + 596, + 249, + 607 + ], + "type": "text", + "content": " to which state " + }, + { + "bbox": [ + 52, + 596, + 249, + 607 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 52, + 596, + 249, + 607 + ], + "type": "text", + "content": " is constrained." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 42, + 611, + 440, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 611, + 440, + 659 + ], + "spans": [ + { + "bbox": [ + 42, + 611, + 440, + 659 + ], + "type": "text", + "content": "When accessing the properties of " + }, + { + "bbox": [ + 42, + 611, + 440, + 659 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 42, + 611, + 440, + 659 + ], + "type": "text", + "content": ", we access the property of the current state of the automaton " + }, + { + "bbox": [ + 42, + 611, + 440, + 659 + ], + "type": "inline_equation", + "content": "q \\in Q" + }, + { + "bbox": [ + 42, + 611, + 440, + 659 + ], + "type": "text", + "content": ", e.g., " + }, + { + "bbox": [ + 42, + 611, + 440, + 659 + ], + "type": "inline_equation", + "content": "A. \\mathrm{LHS} = q. \\mathrm{LHS}" + }, + { + "bbox": [ + 42, + 611, + 440, + 659 + ], + "type": "text", + "content": ". For parsed automata, the current state is the final, accepting state. The TYP attribute expresses the type of the expression parsed so far. In expression states " + }, + { + "bbox": [ + 42, + 611, + 440, + 659 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 42, + 611, + 440, + 659 + ], + "type": "text", + "content": ", we leverage the LHS to accurately determine " + }, + { + "bbox": [ + 42, + 611, + 440, + 659 + ], + "type": "inline_equation", + "content": "q. \\mathrm{TYP}" + }, + { + "bbox": [ + 42, + 611, + 440, + 659 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 43, + 59, + 241, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 59, + 241, + 69 + ], + "spans": [ + { + "bbox": [ + 43, + 59, + 241, + 69 + ], + "type": "text", + "content": "Type-Constrained Code Generation with Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 419, + 60, + 440, + 69 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 419, + 60, + 440, + 69 + ], + "spans": [ + { + "bbox": [ + 419, + 60, + 440, + 69 + ], + "type": "text", + "content": "171:27" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "bbox": [ + 54, + 84, + 436, + 146 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 84, + 436, + 146 + ], + "spans": [ + { + "bbox": [ + 54, + 84, + 436, + 146 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} q _ {\\mathrm {S T R . T Y P}} := \\text {s t r i n g} \\quad q _ {(e) . \\mathrm {T Y P}} := A _ {e}. \\mathrm {T Y P} \\\\ q _ {\\text {N U M}}. \\text {T Y P} := \\text {n u m b e r} \\quad q _ {\\odot e}. \\text {T Y P} := R, \\text {f o r} q _ {\\odot e}. \\text {L H S}. \\text {T Y P} = S, A _ {e}. \\text {T Y P} = T \\text {a n d} S \\odot T: R \\\\ q _ {\\text {B O O L . T Y P}} := \\text {b o o l e a n} \\quad q _ {(\\overline {{e}}). \\text {T Y P}} := T, \\text {f o r} q _ {(\\overline {{e}}). \\text {L H S . T Y P}} = (\\overline {{p}}) \\Rightarrow T \\\\ q _ {x. \\mathrm {T Y P}} := T \\text {w h e r e} q _ {x}. \\Gamma \\vdash x: T \\quad q. n. \\mathrm {T Y P} := T, \\text {f o r L O O K U P} (q. n. \\mathrm {L H S . T Y P}, n) = T \\\\ q _ {(\\overline {{p}}) = > e. T Y P} := \\left(A _ {\\overline {{p}}} ^ {-}. T Y P\\right) = > A _ {e}. T Y P \\\\ \\end{array}", + "image_path": "7e68d3859b1c3c3ce72398ab31c9f29439281fa842eacc7395389c0c754b5a7f.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 42, + 151, + 442, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 151, + 442, + 198 + ], + "spans": [ + { + "bbox": [ + 42, + 151, + 442, + 198 + ], + "type": "text", + "content": "Unrestricted Expressions. The left-hand side of the currently parsed expression is used in the definition of automata for three extending expressions; arithmetic operators, function call, and member access. The arithmetic operator automaton constrains its states to those with valid operators, i.e.:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 162, + 200, + 320, + 226 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 162, + 200, + 320, + 226 + ], + "spans": [ + { + "bbox": [ + 162, + 200, + 320, + 226 + ], + "type": "interline_equation", + "content": "A_{\\odot e}:= \\bigcup_{\\exists R:A_{\\odot e}.LHS.TYP\\odot T = R}A_{\\odot}(\\circ A_{e}\\downarrow T).", + "image_path": "3358439ee189d4f3ef4469308d320b9785a0a8f548a1598b1b5c41afddad2b06.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 42, + 228, + 441, + 252 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 228, + 441, + 252 + ], + "spans": [ + { + "bbox": [ + 42, + 228, + 441, + 252 + ], + "type": "text", + "content": "For function call, the automaton is only valid if the left-hand side is a function, and accepts only the valid signature." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 113, + 257, + 370, + 288 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 257, + 370, + 288 + ], + "spans": [ + { + "bbox": [ + 113, + 257, + 370, + 288 + ], + "type": "interline_equation", + "content": "A _ {(\\overline {{e}})} := \\left\\{ \\begin{array}{l l} A _ {(} \\circ (A _ {\\overline {{e}}} \\downarrow A _ {\\overline {{p}}}. \\mathrm {T Y P}) \\circ A _ {)} & \\text {i f} A _ {(\\overline {{e}}). \\mathrm {L H S . T Y P}} = (\\overline {{p}}) \\Rightarrow T \\\\ A _ {\\emptyset} & \\text {o t h e r w i s e .} \\end{array} \\right.", + "image_path": "37ee7ea4b02c77c67357e62f6555bfa33bbc3c8da9616d5c45611df687113026.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 42, + 293, + 441, + 317 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 293, + 441, + 317 + ], + "spans": [ + { + "bbox": [ + 42, + 293, + 441, + 317 + ], + "type": "text", + "content": "Finally, the member access automaton is a union of the automata that parses the attributes of the left-hand side expression. Or formally," + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 168, + 321, + 315, + 349 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 168, + 321, + 315, + 349 + ], + "spans": [ + { + "bbox": [ + 168, + 321, + 315, + 349 + ], + "type": "interline_equation", + "content": "A_{\\cdot n}:= \\bigcup_{\\exists T:\\text{LOOKUP}(A_{\\cdot n}.LHS.TYP},m) = T}A_{\\cdot \\mathfrak{m}}.", + "image_path": "a0f7132e7a45261ca334925eb701aadbd125056868be897f704152471a35f2a3.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 42, + 354, + 441, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 354, + 441, + 378 + ], + "spans": [ + { + "bbox": [ + 42, + 354, + 441, + 378 + ], + "type": "text", + "content": "Type-Restricted Expressions. The type-restricted versions of the automata are covered by the definitions presented in §3.4. We therefore do not separately list them here." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 42, + 388, + 183, + 399 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 388, + 183, + 399 + ], + "spans": [ + { + "bbox": [ + 42, + 388, + 183, + 399 + ], + "type": "text", + "content": "A.3 Pruning the Type Search" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 42, + 402, + 442, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 402, + 442, + 437 + ], + "spans": [ + { + "bbox": [ + 42, + 402, + 442, + 437 + ], + "type": "text", + "content": "We now present our heuristic for pruning the type search recursion from the prefix automaton for type-constrained expressions in §3.4, i.e., our implementation of PRUNESEARCH at Line 6 of Algorithm 2. The heuristic is based on the complexity and novelty of candidate types to explore." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 42, + 438, + 442, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 438, + 442, + 546 + ], + "spans": [ + { + "bbox": [ + 42, + 438, + 442, + 546 + ], + "type": "text", + "content": "Based on the assumptions about the lookup function and the extension expressions in §3.1, we observe a restriction in the reachable types by extensions: from any given type, we reach itself, result types of arithmetic operators via op, return types through CALL, and member types through MEMBER. A higher-order type " + }, + { + "bbox": [ + 42, + 438, + 442, + 546 + ], + "type": "inline_equation", + "content": "(\\cdot) \\Rightarrow T" + }, + { + "bbox": [ + 42, + 438, + 442, + 546 + ], + "type": "text", + "content": " does not allow access to types not reachable from " + }, + { + "bbox": [ + 42, + 438, + 442, + 546 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 42, + 438, + 442, + 546 + ], + "type": "text", + "content": ". Consequently, we avoid exploring such higher-order types unless the target type is of higher order, or the higher-order type offers novel, yet unexplored types. For instance, in Figure 11, the type " + }, + { + "bbox": [ + 42, + 438, + 442, + 546 + ], + "type": "inline_equation", + "content": "(\\cdot) \\Rightarrow" + }, + { + "bbox": [ + 42, + 438, + 442, + 546 + ], + "type": "text", + "content": " number is not explored, because it is more complex than both the initial and goal types, number and string, and does not contain any unexplored type. Meanwhile, " + }, + { + "bbox": [ + 42, + 438, + 442, + 546 + ], + "type": "inline_equation", + "content": "(\\cdot) \\Rightarrow" + }, + { + "bbox": [ + 42, + 438, + 442, + 546 + ], + "type": "text", + "content": " string is explored, as it contains a novel string type." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 42, + 546, + 442, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 546, + 442, + 594 + ], + "spans": [ + { + "bbox": [ + 42, + 546, + 442, + 594 + ], + "type": "text", + "content": "To formalize this understanding, we introduce the concepts about the depth and root types of a given type, denoted as " + }, + { + "bbox": [ + 42, + 546, + 442, + 594 + ], + "type": "inline_equation", + "content": "\\text{DEPTH}" + }, + { + "bbox": [ + 42, + 546, + 442, + 594 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 42, + 546, + 442, + 594 + ], + "type": "inline_equation", + "content": "\\text{ROOT}" + }, + { + "bbox": [ + 42, + 546, + 442, + 594 + ], + "type": "text", + "content": ", respectively. " + }, + { + "bbox": [ + 42, + 546, + 442, + 594 + ], + "type": "inline_equation", + "content": "\\text{DEPTH}" + }, + { + "bbox": [ + 42, + 546, + 442, + 594 + ], + "type": "text", + "content": " measures the complexity of a type, specifically the order of a function, while " + }, + { + "bbox": [ + 42, + 546, + 442, + 594 + ], + "type": "inline_equation", + "content": "\\text{ROOT}" + }, + { + "bbox": [ + 42, + 546, + 442, + 594 + ], + "type": "text", + "content": " returns all types of minimal depth (e.g., string, number, and boolean) that constitute a higher-order type. They are defined as follows:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 57, + 598, + 433, + 630 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 598, + 433, + 630 + ], + "spans": [ + { + "bbox": [ + 57, + 598, + 433, + 630 + ], + "type": "interline_equation", + "content": "\\operatorname {D E P T H} (T) := \\left\\{ \\begin{array}{l l} \\operatorname {D E P T H} (S) + 1 & \\text {i f} T = (\\overline {{p}}) \\Rightarrow S, \\\\ 0 & \\text {o t h e r w i s e .} \\end{array} \\right. \\qquad \\operatorname {R O O T} (T) := \\left\\{ \\begin{array}{l l} \\operatorname {R O O T} (S) & \\text {i f} T = (\\overline {{p}}) \\Rightarrow S, \\\\ \\{T \\} & \\text {o t h e r w i s e .} \\end{array} \\right.", + "image_path": "5061290a5981e83f5d5267b7bd48cef13eddf525ec623f2d9e3f58041b5fd498.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 42, + 635, + 441, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 635, + 441, + 659 + ], + "spans": [ + { + "bbox": [ + 42, + 635, + 441, + 659 + ], + "type": "text", + "content": "We leverage DEPTH and ROOT to implement PRUNESEARCH " + }, + { + "bbox": [ + 42, + 635, + 441, + 659 + ], + "type": "inline_equation", + "content": "(T,G,S)" + }, + { + "bbox": [ + 42, + 635, + 441, + 659 + ], + "type": "text", + "content": " for a current type " + }, + { + "bbox": [ + 42, + 635, + 441, + 659 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 42, + 635, + 441, + 659 + ], + "type": "text", + "content": ", a goal type " + }, + { + "bbox": [ + 42, + 635, + 441, + 659 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 42, + 635, + 441, + 659 + ], + "type": "text", + "content": ", and a type " + }, + { + "bbox": [ + 42, + 635, + 441, + 659 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 42, + 635, + 441, + 659 + ], + "type": "text", + "content": " after an extension is applied on " + }, + { + "bbox": [ + 42, + 635, + 441, + 659 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 42, + 635, + 441, + 659 + ], + "type": "text", + "content": ". In general, if " + }, + { + "bbox": [ + 42, + 635, + 441, + 659 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 42, + 635, + 441, + 659 + ], + "type": "text", + "content": " is not directly accessible" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 44, + 60, + 67, + 69 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 60, + 67, + 69 + ], + "spans": [ + { + "bbox": [ + 44, + 60, + 67, + 69 + ], + "type": "text", + "content": "171:28" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 165, + 59, + 441, + 70 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 59, + 441, + 70 + ], + "spans": [ + { + "bbox": [ + 165, + 59, + 441, + 70 + ], + "type": "text", + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 27 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 121, + 82, + 362, + 174 + ], + "blocks": [ + { + "bbox": [ + 121, + 82, + 362, + 174 + ], + "lines": [ + { + "bbox": [ + 121, + 82, + 362, + 174 + ], + "spans": [ + { + "bbox": [ + 121, + 82, + 362, + 174 + ], + "type": "image", + "image_path": "e4f0d79317e2cd79bc15fc30c4d4aeea45c8d0e676e0e7d6a6cc2bb0c53bb505.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 42, + 177, + 443, + 226 + ], + "lines": [ + { + "bbox": [ + 42, + 177, + 443, + 226 + ], + "spans": [ + { + "bbox": [ + 42, + 177, + 443, + 226 + ], + "type": "text", + "content": "Figure 11. An example search through the graph for type reachability, starting from " + }, + { + "bbox": [ + 42, + 177, + 443, + 226 + ], + "type": "inline_equation", + "content": "T =" + }, + { + "bbox": [ + 42, + 177, + 443, + 226 + ], + "type": "text", + "content": " number with the goal string, e.g., after parsing let x : string; " + }, + { + "bbox": [ + 42, + 177, + 443, + 226 + ], + "type": "inline_equation", + "content": "x = 1" + }, + { + "bbox": [ + 42, + 177, + 443, + 226 + ], + "type": "text", + "content": ". States and edges along the final path are marked in green and explored nodes in blue. The () => number node is not explored, as complex types are avoided by our heuristic. The node () => string is explored as it enables reaching new type string." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 42, + 257, + 441, + 306 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 257, + 441, + 306 + ], + "spans": [ + { + "bbox": [ + 42, + 257, + 441, + 306 + ], + "type": "text", + "content": "from " + }, + { + "bbox": [ + 42, + 257, + 441, + 306 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 42, + 257, + 441, + 306 + ], + "type": "text", + "content": ", it will also not be accessible from expressions with the same root types but greater depth, such as " + }, + { + "bbox": [ + 42, + 257, + 441, + 306 + ], + "type": "inline_equation", + "content": "() \\Rightarrow T" + }, + { + "bbox": [ + 42, + 257, + 441, + 306 + ], + "type": "text", + "content": ". When " + }, + { + "bbox": [ + 42, + 257, + 441, + 306 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 42, + 257, + 441, + 306 + ], + "type": "text", + "content": " is of higher order, exploring up to the depth of " + }, + { + "bbox": [ + 42, + 257, + 441, + 306 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 42, + 257, + 441, + 306 + ], + "type": "text", + "content": " can be required, such as when " + }, + { + "bbox": [ + 42, + 257, + 441, + 306 + ], + "type": "inline_equation", + "content": "G = () \\Rightarrow ((.) => \\text{number})" + }, + { + "bbox": [ + 42, + 257, + 441, + 306 + ], + "type": "text", + "content": ". Based on these two ideas, we stop exploring " + }, + { + "bbox": [ + 42, + 257, + 441, + 306 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 42, + 257, + 441, + 306 + ], + "type": "text", + "content": " when " + }, + { + "bbox": [ + 42, + 257, + 441, + 306 + ], + "type": "inline_equation", + "content": "\\text{DEPTH}(S) > \\max(\\text{DEPTH}(G), \\text{DEPTH}(T))" + }, + { + "bbox": [ + 42, + 257, + 441, + 306 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 42, + 306, + 442, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 306, + 442, + 378 + ], + "spans": [ + { + "bbox": [ + 42, + 306, + 442, + 378 + ], + "type": "text", + "content": "Further, if a higher-depth function returns an unexplored type, we need to explore it. Sticking to the example in Figure 11, type number has the member toString of type () => string. The type string can only be reached by exploring the member access at depth 1. On the contrary, we do not explore a higher-depth function if it does not introduce novel types other than those explored. To achieve this, we adapt Algorithm 2 to additionally define a set of root types " + }, + { + "bbox": [ + 42, + 306, + 442, + 378 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 42, + 306, + 442, + 378 + ], + "type": "text", + "content": ", which is initialized to an empty set and is updated by " + }, + { + "bbox": [ + 42, + 306, + 442, + 378 + ], + "type": "inline_equation", + "content": "R := R \\cup \\mathrm{root}(T)" + }, + { + "bbox": [ + 42, + 306, + 442, + 378 + ], + "type": "text", + "content": ". We do not explore " + }, + { + "bbox": [ + 42, + 306, + 442, + 378 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 42, + 306, + 442, + 378 + ], + "type": "text", + "content": " if " + }, + { + "bbox": [ + 42, + 306, + 442, + 378 + ], + "type": "inline_equation", + "content": "\\mathrm{root}(S) \\subseteq R" + }, + { + "bbox": [ + 42, + 306, + 442, + 378 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 42, + 378, + 442, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 378, + 442, + 462 + ], + "spans": [ + { + "bbox": [ + 42, + 378, + 442, + 462 + ], + "type": "text", + "content": "Taking the conjunction of the aforementioned two aspects, our pruning heuristic is implemented as PRUNESEARCH" + }, + { + "bbox": [ + 42, + 378, + 442, + 462 + ], + "type": "inline_equation", + "content": "(T,G,S) \\coloneqq \\mathrm{DEPTH}(S) > \\max(\\mathrm{DEPTH}(T), \\mathrm{DEPTH}(S)) \\wedge \\mathrm{ROOT}(S) \\subseteq R" + }, + { + "bbox": [ + 42, + 378, + 442, + 462 + ], + "type": "text", + "content": ". The restrictions based on depth and root types are based on the results of the rigorously analyzed search over succinct types by Gvero et al. [30]. This provides a robust heuristic for exploring as many relevant inhabitable types as possible. However, due to the additional complexity introduced by the lookup function, we can not guarantee completeness and instead refer to the strong empirical results in our evaluation in §5 as evidence of the search's high coverage." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 43, + 474, + 204, + 486 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 474, + 204, + 486 + ], + "spans": [ + { + "bbox": [ + 43, + 474, + 204, + 486 + ], + "type": "text", + "content": "A.4 Implementation of DERIVABLE" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 42, + 489, + 441, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 489, + 441, + 561 + ], + "spans": [ + { + "bbox": [ + 42, + 489, + 441, + 561 + ], + "type": "text", + "content": "Recall that in Table 1, DERIVABLE for function expressions are defined as: " + }, + { + "bbox": [ + 42, + 489, + 441, + 561 + ], + "type": "inline_equation", + "content": "\\mathrm{DERIVABLE}(q_{(\\overline{p})\\Rightarrow e})\\coloneqq \\{(\\overline{p})\\Rightarrow T\\mid \\mathrm{REACHABLE}(\\mathrm{DERIVABLE}(q_e),T)\\}" + }, + { + "bbox": [ + 42, + 489, + 441, + 561 + ], + "type": "text", + "content": ". This involves constructing a type reachability graph and collecting all types " + }, + { + "bbox": [ + 42, + 489, + 441, + 561 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 42, + 489, + 441, + 561 + ], + "type": "text", + "content": " reachable from DERIVABLE " + }, + { + "bbox": [ + 42, + 489, + 441, + 561 + ], + "type": "inline_equation", + "content": "(q_{e})" + }, + { + "bbox": [ + 42, + 489, + 441, + 561 + ], + "type": "text", + "content": ". However, this process is intractable because " + }, + { + "bbox": [ + 42, + 489, + 441, + 561 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 42, + 489, + 441, + 561 + ], + "type": "text", + "content": " can be of arbitrarily high-order, as such there are infinitely many " + }, + { + "bbox": [ + 42, + 489, + 441, + 561 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 42, + 489, + 441, + 561 + ], + "type": "text", + "content": " to explore. A similar issue exists for grouped expressions, as their DERIVABLE function is also defined to enumerate reachable types. We introduce two optimization heuristics to address this problem." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 42, + 561, + 442, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 561, + 442, + 658 + ], + "spans": [ + { + "bbox": [ + 42, + 561, + 442, + 658 + ], + "type": "text", + "content": "We first observe that DERIVABLE is always called within the context of an invocation of REACHABLE with target type " + }, + { + "bbox": [ + 42, + 561, + 442, + 658 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 42, + 561, + 442, + 658 + ], + "type": "text", + "content": ", e.g., REACHABLE(DERIVABLE(q(\\overline{p}) => e), G) for function expressions. To compute DERIVABLE(q(\\overline{p}) => e), we enumerate all types present on the type graph represented by REACHABLE(DERIVABLE(q_e), G), which is finite due to application of the pruning heuristics in Appendix A.3. In other words, we bound the maximum complexity of considered types " + }, + { + "bbox": [ + 42, + 561, + 442, + 658 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 42, + 561, + 442, + 658 + ], + "type": "text", + "content": " using the pruning heuristic for reachability of target type " + }, + { + "bbox": [ + 42, + 561, + 442, + 658 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 42, + 561, + 442, + 658 + ], + "type": "text", + "content": ". This leads to a sound but potentially incomplete version of DERIVABLE. However, since the final goal is to reach " + }, + { + "bbox": [ + 42, + 561, + 442, + 658 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 42, + 561, + 442, + 658 + ], + "type": "text", + "content": ", this heuristic provides a practically useful set of all relevant derivable types." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 43, + 59, + 241, + 70 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 59, + 241, + 70 + ], + "spans": [ + { + "bbox": [ + 43, + 59, + 241, + 70 + ], + "type": "text", + "content": "Type-Constrained Code Generation with Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 419, + 60, + 441, + 69 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 419, + 60, + 441, + 69 + ], + "spans": [ + { + "bbox": [ + 419, + 60, + 441, + 69 + ], + "type": "text", + "content": "171:29" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 28 + }, + { + "para_blocks": [ + { + "bbox": [ + 42, + 85, + 440, + 146 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 85, + 440, + 146 + ], + "spans": [ + { + "bbox": [ + 42, + 85, + 440, + 146 + ], + "type": "text", + "content": "Second, we observe that the resulting two-tiered call REACHABLE( DERIVABLE " + }, + { + "bbox": [ + 42, + 85, + 440, + 146 + ], + "type": "inline_equation", + "content": "(q_{(\\overline{p})} \\Rightarrow e)" + }, + { + "bbox": [ + 42, + 85, + 440, + 146 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 42, + 85, + 440, + 146 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 42, + 85, + 440, + 146 + ], + "type": "text", + "content": ") can be integrated into a single call to further reduce the amount of explored types. Concretely, when discovering some type " + }, + { + "bbox": [ + 42, + 85, + 440, + 146 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 42, + 85, + 440, + 146 + ], + "type": "text", + "content": " in REACHABLE( DERIVABLE " + }, + { + "bbox": [ + 42, + 85, + 440, + 146 + ], + "type": "inline_equation", + "content": "(q_e)" + }, + { + "bbox": [ + 42, + 85, + 440, + 146 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 42, + 85, + 440, + 146 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 42, + 85, + 440, + 146 + ], + "type": "text", + "content": "), as per the previous heuristic, we allow transitioning directly to REACHABLE " + }, + { + "bbox": [ + 42, + 85, + 440, + 146 + ], + "type": "inline_equation", + "content": "(\\overline{p}) \\Rightarrow M, G" + }, + { + "bbox": [ + 42, + 85, + 440, + 146 + ], + "type": "text", + "content": " to allow a depth-prioritizing exploration of the search graph. This allows us to efficiently discover a path to " + }, + { + "bbox": [ + 42, + 85, + 440, + 146 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 42, + 85, + 440, + 146 + ], + "type": "text", + "content": " if it exists." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 43, + 155, + 121, + 166 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 155, + 121, + 166 + ], + "spans": [ + { + "bbox": [ + 43, + 155, + 121, + 166 + ], + "type": "text", + "content": "A.5 Statements" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 42, + 170, + 440, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 170, + 440, + 194 + ], + "spans": [ + { + "bbox": [ + 42, + 170, + 440, + 194 + ], + "type": "text", + "content": "We define the remaining automata to capture the complete language from §3.1. To correctly handle function return types, we pass on related information when entering function bodies:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 198, + 440, + 257 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 53, + 198, + 277, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 198, + 277, + 209 + ], + "spans": [ + { + "bbox": [ + 53, + 198, + 277, + 209 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 53, + 198, + 277, + 209 + ], + "type": "inline_equation", + "content": "q.R" + }, + { + "bbox": [ + 53, + 198, + 277, + 209 + ], + "type": "text", + "content": ": The expected return type of the current state " + }, + { + "bbox": [ + 53, + 198, + 277, + 209 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 53, + 198, + 277, + 209 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 210, + 421, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 210, + 421, + 221 + ], + "spans": [ + { + "bbox": [ + 52, + 210, + 421, + 221 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 52, + 210, + 421, + 221 + ], + "type": "inline_equation", + "content": "q." + }, + { + "bbox": [ + 52, + 210, + 421, + 221 + ], + "type": "text", + "content": " RETURNED: Whether the currently parsed program block has returned in all branches." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 221, + 440, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 221, + 440, + 257 + ], + "spans": [ + { + "bbox": [ + 52, + 221, + 440, + 257 + ], + "type": "text", + "content": "- q.MUSTRETURN: Whether the currently parsed program block must return (i.e., If-Then-Else branches do not need to contain return statements even if a return type is expected of the surrounding code block)." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 42, + 261, + 441, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 261, + 441, + 393 + ], + "spans": [ + { + "bbox": [ + 42, + 261, + 441, + 393 + ], + "type": "text", + "content": "The single statement automaton is another recursive definition, since some statements, e.g., If-Then-Else, can themselves contain statements. The statement automaton is defined recursively as " + }, + { + "bbox": [ + 42, + 261, + 441, + 393 + ], + "type": "inline_equation", + "content": "A_{s} \\coloneqq A_{\\mathrm{DECL}} \\cup A_{\\mathrm{EXPR}} \\cup A_{\\mathrm{RET}} \\cup A_{\\mathrm{BLOCK}} \\cup A_{\\mathrm{FUN}} \\cup A_{\\mathrm{ITE}}" + }, + { + "bbox": [ + 42, + 261, + 441, + 393 + ], + "type": "text", + "content": ". The expression statement automaton and block automaton are simply defined as " + }, + { + "bbox": [ + 42, + 261, + 441, + 393 + ], + "type": "inline_equation", + "content": "A_{\\mathrm{EXPR}} \\coloneqq A_{e}" + }, + { + "bbox": [ + 42, + 261, + 441, + 393 + ], + "type": "text", + "content": "; and " + }, + { + "bbox": [ + 42, + 261, + 441, + 393 + ], + "type": "inline_equation", + "content": "A_{\\mathrm{BLOCK}} \\coloneqq A_{\\{\\overline{s}\\}}" + }, + { + "bbox": [ + 42, + 261, + 441, + 393 + ], + "type": "text", + "content": ". The declaration automaton " + }, + { + "bbox": [ + 42, + 261, + 441, + 393 + ], + "type": "inline_equation", + "content": "A_{\\mathrm{DECL}} \\coloneqq A_{\\mathrm{let} x:T}" + }, + { + "bbox": [ + 42, + 261, + 441, + 393 + ], + "type": "text", + "content": "; captures variable names " + }, + { + "bbox": [ + 42, + 261, + 441, + 393 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 42, + 261, + 441, + 393 + ], + "type": "text", + "content": " using an automaton for non-existing identifiers, which works the same way as " + }, + { + "bbox": [ + 42, + 261, + 441, + 393 + ], + "type": "inline_equation", + "content": "A_{x}" + }, + { + "bbox": [ + 42, + 261, + 441, + 393 + ], + "type": "text", + "content": " except that it rejects terminals that match an existing variable. This automaton is a prefix automaton as well, since indefinite additional characters can be added to the variable name and there are only finitely many defined variables. The If-Then-Else automaton is defined using standard concatenation: " + }, + { + "bbox": [ + 42, + 261, + 441, + 393 + ], + "type": "inline_equation", + "content": "A_{\\mathrm{ITE}} \\coloneqq A_{\\mathrm{if}(e) s \\text{else}s}" + }, + { + "bbox": [ + 42, + 261, + 441, + 393 + ], + "type": "text", + "content": ". The statements automaton " + }, + { + "bbox": [ + 42, + 261, + 441, + 393 + ], + "type": "inline_equation", + "content": "A_{\\overline{s}}" + }, + { + "bbox": [ + 42, + 261, + 441, + 393 + ], + "type": "text", + "content": ", based on the Kleene-Star automaton definition and the single statement automaton. Return statements are only non-empty when the expected return type is set, i.e. when parsing inside a function:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 154, + 398, + 329, + 430 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 154, + 398, + 329, + 430 + ], + "spans": [ + { + "bbox": [ + 154, + 398, + 329, + 430 + ], + "type": "interline_equation", + "content": "A _ {\\mathrm {R E T}} := \\left\\{ \\begin{array}{l l} A _ {\\mathrm {r e t u r n}} \\circ A _ {e} \\downarrow T & \\text {i f} A _ {\\mathrm {R E T}}. R = T \\\\ A _ {\\emptyset} & \\text {o t h e r w i s e .} \\end{array} \\right.", + "image_path": "affe5abc4b21e199de8d5fd3ee6d500dc4622a2daba4cad2f072210df2eed967.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 42, + 436, + 442, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 436, + 442, + 459 + ], + "spans": [ + { + "bbox": [ + 42, + 436, + 442, + 459 + ], + "type": "text", + "content": "For functions, the automaton is based on the standard concatenation " + }, + { + "bbox": [ + 42, + 436, + 442, + 459 + ], + "type": "inline_equation", + "content": "A_{\\text{FUN}} \\coloneqq A_{\\text{function } x(\\overline{p}):T(\\overline{s})}" + }, + { + "bbox": [ + 42, + 436, + 442, + 459 + ], + "type": "text", + "content": ". However, the transition function updates the states of the statement automata inside the function:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 52, + 462, + 440, + 558 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 52, + 462, + 440, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 462, + 440, + 486 + ], + "spans": [ + { + "bbox": [ + 52, + 462, + 440, + 486 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 52, + 462, + 440, + 486 + ], + "type": "inline_equation", + "content": "q.R \\coloneqq T" + }, + { + "bbox": [ + 52, + 462, + 440, + 486 + ], + "type": "text", + "content": ", i.e., the return type of these statements is set to the return type of the function. This value is propagated recursively to all sub-automata." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 52, + 486, + 440, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 486, + 440, + 522 + ], + "spans": [ + { + "bbox": [ + 52, + 486, + 440, + 522 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 52, + 486, + 440, + 522 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 52, + 486, + 440, + 522 + ], + "type": "text", + "content": ".MUSTRETURN := true, for the outermost statement block automaton. It is set to false for deeper nested statement blocks and as soon as a parsed statement " + }, + { + "bbox": [ + 52, + 486, + 440, + 522 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 52, + 486, + 440, + 522 + ], + "type": "text", + "content": " has " + }, + { + "bbox": [ + 52, + 486, + 440, + 522 + ], + "type": "inline_equation", + "content": "q_{X}" + }, + { + "bbox": [ + 52, + 486, + 440, + 522 + ], + "type": "text", + "content": ".RETURNED set to true - i.e. one of the main body statements returned in every branch." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 52, + 522, + 440, + 558 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 522, + 440, + 558 + ], + "spans": [ + { + "bbox": [ + 52, + 522, + 440, + 558 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 52, + 522, + 440, + 558 + ], + "type": "inline_equation", + "content": "q. \\text{RETURNED} :=" + }, + { + "bbox": [ + 52, + 522, + 440, + 558 + ], + "type": "text", + "content": " false, per default in every statement, except a) in return automata, b) inside a multi-statement automaton where the previous statement has RETURNED = true and c) in ITE-automata where both branching statements have RETURNED = true." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 42, + 562, + 448, + 610 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 562, + 448, + 610 + ], + "spans": [ + { + "bbox": [ + 42, + 562, + 448, + 610 + ], + "type": "text", + "content": "As long as a state " + }, + { + "bbox": [ + 42, + 562, + 448, + 610 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 42, + 562, + 448, + 610 + ], + "type": "text", + "content": " in a multi-statement automaton has " + }, + { + "bbox": [ + 42, + 562, + 448, + 610 + ], + "type": "inline_equation", + "content": "X. \\text{RETURNED} = \\text{false}" + }, + { + "bbox": [ + 42, + 562, + 448, + 610 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 42, + 562, + 448, + 610 + ], + "type": "inline_equation", + "content": "q. \\text{MUSTRETURN} = \\text{true}" + }, + { + "bbox": [ + 42, + 562, + 448, + 610 + ], + "type": "text", + "content": ", it can not accept but instead forces the generation of another statement. Since we can always express the requested type through literals and can always generate a return statement to fulfill this requirement, the prefix automaton property is not violated." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 43, + 619, + 239, + 631 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 619, + 239, + 631 + ], + "spans": [ + { + "bbox": [ + 43, + 619, + 239, + 631 + ], + "type": "text", + "content": "B Details about Experimental Evaluation" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 42, + 635, + 440, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 635, + 440, + 659 + ], + "spans": [ + { + "bbox": [ + 42, + 635, + 440, + 659 + ], + "type": "text", + "content": "In this section, we detail how executable code is extracted from the model responses and a slight modification to the decoding algorithm used, that increases throughput heuristically." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 44, + 60, + 67, + 69 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 60, + 67, + 69 + ], + "spans": [ + { + "bbox": [ + 44, + 60, + 67, + 69 + ], + "type": "text", + "content": "171:30" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 165, + 59, + 441, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 59, + 441, + 69 + ], + "spans": [ + { + "bbox": [ + 165, + 59, + 441, + 69 + ], + "type": "text", + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 29 + }, + { + "para_blocks": [ + { + "bbox": [ + 42, + 84, + 443, + 146 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 84, + 443, + 146 + ], + "spans": [ + { + "bbox": [ + 42, + 84, + 443, + 146 + ], + "type": "text", + "content": "Implementation Details. We have two main external dependencies. To implement the regular-expression-based literal automata, we leverage the regex library, as it allows checking if the current string can be completed to match a regular expression. To implement LLM inference, we leverage the transformers library. We provide an exhaustive list of supported and unsupported features of the TypeScript language in our final implementation in Tables 5 and 6, respectively." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 42, + 151, + 442, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 151, + 442, + 212 + ], + "spans": [ + { + "bbox": [ + 42, + 151, + 442, + 212 + ], + "type": "text", + "content": "Hyperparameters. We run the models on A100 NVidia GPUs with 80 GB of VRAM and CUDA version 12.4. We set the sampling temperature to 1. We set seeds to 0 to 4 on the four HumanEval runs and 0 on the one MBPP run, respectively. We limit the completions to 1000 tokens and time out after 300 seconds. We compute syntactic correctness using the Oxidation toolchain [52] as the official TypeScript compiler does not clearly distinguish between syntactic and semantic errors." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 42, + 216, + 442, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 216, + 442, + 289 + ], + "spans": [ + { + "bbox": [ + 42, + 216, + 442, + 289 + ], + "type": "text", + "content": "Excluded MBPP Instances. We discovered that a number of TypeScript translations in the MultiPL-E dataset [13] contained invalidly generated nested tuples. After reporting them to the developers, they have been resolved in the latest version of MBPP and we include them in our evaluation. Still, we find that the TypeScript translation of a number of MBPP instances contains too broad type annotation, annotating elements as any or array of any. We therefore exclude the following 6 instances from the evaluation:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 73, + 292, + 371, + 326 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 73, + 292, + 313, + 303 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 292, + 313, + 303 + ], + "spans": [ + { + "bbox": [ + 73, + 292, + 313, + 303 + ], + "type": "text", + "content": "- mbpp_405_check_tuplex\n- mbpp_612_merge" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 73, + 304, + 371, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 304, + 371, + 315 + ], + "spans": [ + { + "bbox": [ + 73, + 304, + 371, + 315 + ], + "type": "text", + "content": "- mbpp_563extract_values -mbpp_725.extract_quotation" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 73, + 316, + 352, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 316, + 352, + 326 + ], + "spans": [ + { + "bbox": [ + 73, + 316, + 352, + 326 + ], + "type": "text", + "content": "- mbpp_580.extract_even\n- mbpp_791_removeNSTed" + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 42, + 331, + 443, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 331, + 443, + 439 + ], + "spans": [ + { + "bbox": [ + 42, + 331, + 443, + 439 + ], + "type": "text", + "content": "Complete Prompts. We provide the complete LLM prompts for our evaluated tasks (synthesis, translation, and repair) in Figures 12-14. The prompts are templates, instantiated with instructions specific to each task and problem instance. If system prompts are not available for a given LLM, we pretend the system prompt to the first user prompt. The model completion starts from a pre-filled function signature, enabling unified unit testing. For the repair prompt, we add the non-compilable model output as assistant output and use a second turn to pass back compiler outputs. Compiler errors contain line numbers for localization, so we annotate the output with line numbers. We find that Qwen2.5 32B tends to always generate test cases, which leads to errors during compilation. We therefore append the sentence Do not include test cases in the code. to its prompt." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 42, + 445, + 443, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 445, + 443, + 542 + ], + "spans": [ + { + "bbox": [ + 42, + 445, + 443, + 542 + ], + "type": "text", + "content": "Extracting Output Code. Given our prompts, LLMs are expected to output the resulting programs. However, they often produce additional outputs, such as generated test cases and explanations. Now we describe our heuristics for extracting the generated code. We first extract the corresponding TypeScript code block (i.e., ``` typescript`, or do not cut off if the block is not closed. Inside the code block, we cut off after the closing curly brace of the last balanced pair of curly braces, if it is followed by a newline or semicolon. This determines the last statement block generated, and avoids cutting off, e.g., inside a template literal. Again, if no such case is found, we do not prune the output. We demonstrate the operation of our cutoff heuristics in Figure 15." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 43, + 549, + 173, + 561 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 549, + 173, + 561 + ], + "spans": [ + { + "bbox": [ + 43, + 549, + 173, + 561 + ], + "type": "text", + "content": "C Case Study Full Outputs" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 42, + 565, + 442, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 565, + 442, + 601 + ], + "spans": [ + { + "bbox": [ + 42, + 565, + 442, + 601 + ], + "type": "text", + "content": "In §5.4, we present the shortened versions of three qualitative examples showcasing the effectiveness of our approach. In Figures 16-18, we provide the full code outputs of these examples, with detailed descriptions in the respective captions." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 43, + 59, + 241, + 70 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 59, + 241, + 70 + ], + "spans": [ + { + "bbox": [ + 43, + 59, + 241, + 70 + ], + "type": "text", + "content": "Type-Constrained Code Generation with Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 419, + 60, + 440, + 69 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 419, + 60, + 440, + 69 + ], + "spans": [ + { + "bbox": [ + 419, + 60, + 440, + 69 + ], + "type": "text", + "content": "171:31" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 43, + 638, + 148, + 649 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 638, + 148, + 649 + ], + "spans": [ + { + "bbox": [ + 43, + 638, + 148, + 649 + ], + "type": "text", + "content": "3https://pypi.org/project/regex/" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 43, + 649, + 184, + 658 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 649, + 184, + 658 + ], + "spans": [ + { + "bbox": [ + 43, + 649, + 184, + 658 + ], + "type": "text", + "content": "4 https://huggingface.co/docs/transformers" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 30 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 44, + 97, + 439, + 461 + ], + "blocks": [ + { + "bbox": [ + 167, + 84, + 316, + 95 + ], + "lines": [ + { + "bbox": [ + 167, + 84, + 316, + 95 + ], + "spans": [ + { + "bbox": [ + 167, + 84, + 316, + 95 + ], + "type": "text", + "content": "Table 5. Supported TypeScript features." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 44, + 97, + 439, + 461 + ], + "lines": [ + { + "bbox": [ + 44, + 97, + 439, + 461 + ], + "spans": [ + { + "bbox": [ + 44, + 97, + 439, + 461 + ], + "type": "table", + "html": "
Supported TypeScript FeaturesExamples
Expressions, Statements, Function Declarations(LB as introduced in §3)
Additional Literals: BigInt, Regex, Template Strings10n, /\\d*, 'hello ${user}'
Additional Types: void, null, undefinedvoid, undefined, null
Index Signature Types and Literalslet x: {{y: number}: string} = 1: "hi";
Anonymous Functionsfunction(): bool {return true}
Lambda Functions with and without Function Bodiesx => {return y}, x => y
Ternary and LogicOperators? :, |, &&
Arithmetic and Boolean Operations+, -, **, &, !
Assigning Pre-and Postfix Operators++, --
Arrays[1, 2, 3]
Access and Assignment to Computed Membersx[10] = y[i];
Constructors and "new" Callslet x = new Number(1);
Calls with Optional and Rest Parametersfunction foo(x?: number, y...: string)
Sets and MapsMap<string, number>}()
Parameterized Constructor Callsnew Set<string>}()
Tupleslet x: [int, string] = [1, "hello"];
Optional Chainingx.get("hi").get("world")
Spread Operator[...xs]
Type Assertions"hello" as any
For Loopsfor(int x = 0; i < 10; i++)
For Of Loopsfor(x of xs)
For Of Loops with Tuple Destructuringfor([x, y] of xys)
Do-While and While Loopswhile (true) {...}
Typed and Untyped Variable Declarationslet x: number = 1; let y = 100;
Comments, Multiline Comments// Comment
Returning without Expressionsreturn;
Try-Catch Statements with a Fixed Exception Typetry {...} catch (e) {...}
Throw Statementsthrow new Error("..."
Importing the crypto Libraryrequire("crypto")
Global Scope ObjectsMath, parseInt
Automatic Semicolon Insertion
", + "image_path": "0c4baffe0fc9edc7385e27cc94d0c45ae5838eae71ab014be51ed0f83954a62d.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 45, + 496, + 439, + 651 + ], + "blocks": [ + { + "bbox": [ + 162, + 482, + 322, + 493 + ], + "lines": [ + { + "bbox": [ + 162, + 482, + 322, + 493 + ], + "spans": [ + { + "bbox": [ + 162, + 482, + 322, + 493 + ], + "type": "text", + "content": "Table 6.Unsupported TypeScript features." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 45, + 496, + 439, + 651 + ], + "lines": [ + { + "bbox": [ + 45, + 496, + 439, + 651 + ], + "spans": [ + { + "bbox": [ + 45, + 496, + 439, + 651 + ], + "type": "table", + "html": "
Missing FeaturesExamples
General Library Importsrequire("example")
Use of Functions Before Declaration
For In Loopsfor(x in y)
Type Declaration
User-Defined Classes
Declaration and Parameterized Call of General Parameterized Functions
Destructuring Assignment[x, y] = z
Uninitialized, Unannotated Variable Declarationslet x;
Return Type Inference
Literal Types
Enumerables
Symbols
", + "image_path": "1f1a04cc743632903100ca0c76f3e776b8f11d465997df71f1a7f2b79743c678.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 60, + 67, + 69 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 60, + 67, + 69 + ], + "spans": [ + { + "bbox": [ + 45, + 60, + 67, + 69 + ], + "type": "text", + "content": "171:32" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 165, + 59, + 441, + 70 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 59, + 441, + 70 + ], + "spans": [ + { + "bbox": [ + 165, + 59, + 441, + 70 + ], + "type": "text", + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 31 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 52, + 86, + 433, + 261 + ], + "blocks": [ + { + "bbox": [ + 52, + 86, + 433, + 261 + ], + "lines": [ + { + "bbox": [ + 52, + 86, + 433, + 261 + ], + "spans": [ + { + "bbox": [ + 52, + 86, + 433, + 261 + ], + "type": "text", + "content": "System: \nYou are an expert in TypeScript programming. Solve the given problem by writing solution code in TypeScript. When answering, insert the solution code in a \\*\\*typescript... block. Do not include test cases in the code.. \nUser: \nCheck if in given array of numbers, are any two numbers closer to each other than given threshold. \n>>> has_close_elements([1.0, 2.0, 3.0], 0.5) \nfalse \n>>> has_close_elements([1.0, 2.8, 3.0, 4.0, 5.0, 2.0], 0.3) \ntrue function \nAssistant: \n``~typescript \nfunction has_close_elements(numbers: number[], threshold: number): boolean {" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_body" + } + ], + "index": 2, + "sub_type": "code", + "guess_lang": "typescript" + }, + { + "bbox": [ + 43, + 267, + 441, + 305 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 267, + 441, + 305 + ], + "spans": [ + { + "bbox": [ + 43, + 267, + 441, + 305 + ], + "type": "text", + "content": "Figure 12. The full prompt for the synthesis task. Text in green is based on the problem instance, in this case HumanEval #0. The red sentence is only added for Qwen2.5 32B due to its tendency to generate non-compiling test cases." + } + ] + } + ], + "index": 3, + "type": "text" + }, + { + "type": "code", + "bbox": [ + 52, + 315, + 420, + 601 + ], + "blocks": [ + { + "bbox": [ + 52, + 315, + 420, + 601 + ], + "lines": [ + { + "bbox": [ + 52, + 315, + 420, + 601 + ], + "spans": [ + { + "bbox": [ + 52, + 315, + 420, + 601 + ], + "type": "text", + "content": "System: \nYou are a helpful and expert programmer in Python and TypeScript. You will be given an input program in Python and your task is to translate this program into TypeScript. You may assume that the input program is correct and that the translation should be semantically equivalent. When answering, insert the solution code in a \\*\\*typescript... block. Do not include test cases in the code.. \nUser: \nThe following is the source program in Python: \n``python \nfrom typing import List \ndef has_close_elements(numbers: List[float], threshold: float) -> bool: for idx, elem in enumerate(numbers): for idx2, elem2 in enumerate(numbers): if idx != idx2: distance = abs(elem - elem2) if distance < threshold: return True return False \nPlease translate the source program to TypeScript. \nAssistant: \n``'typescript \nfunction has_close_elements(numbers: number[], threshold: number): boolean {" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_body" + } + ], + "index": 4, + "sub_type": "code", + "guess_lang": "python" + }, + { + "bbox": [ + 43, + 608, + 441, + 647 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 608, + 441, + 647 + ], + "spans": [ + { + "bbox": [ + 43, + 608, + 441, + 647 + ], + "type": "text", + "content": "Figure 13. The full prompt for the translation task. Text in green is based on the problem instance, blue is the original Python canonical solution, in this case HumanEval #0. The red sentence is only added for Qwen2.5 32B due to its tendency to generate non-compiling test cases." + } + ] + } + ], + "index": 5, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 43, + 60, + 240, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 60, + 240, + 69 + ], + "spans": [ + { + "bbox": [ + 43, + 60, + 240, + 69 + ], + "type": "text", + "content": "Type-Constrained Code Generation with Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 419, + 60, + 440, + 68 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 419, + 60, + 440, + 68 + ], + "spans": [ + { + "bbox": [ + 419, + 60, + 440, + 68 + ], + "type": "text", + "content": "171:33" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 32 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 107, + 80, + 116 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 107, + 80, + 116 + ], + "spans": [ + { + "bbox": [ + 52, + 107, + 80, + 116 + ], + "type": "text", + "content": "System:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 59, + 118, + 420, + 136 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 118, + 420, + 136 + ], + "spans": [ + { + "bbox": [ + 59, + 118, + 420, + 136 + ], + "type": "text", + "content": "You are an expert in JavaScript programming. Solve the given problem by writing solution code in JavaScript. When answering, insert the solution code in a `````````````````````````````````````````````````````````````````````````````````````````````````````" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 148, + 72, + 155 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 148, + 72, + 155 + ], + "spans": [ + { + "bbox": [ + 52, + 148, + 72, + 155 + ], + "type": "text", + "content": "User:" + } + ] + } + ], + "index": 4 + }, + { + "type": "code", + "bbox": [ + 62, + 157, + 324, + 252 + ], + "blocks": [ + { + "bbox": [ + 62, + 157, + 324, + 252 + ], + "lines": [ + { + "bbox": [ + 62, + 157, + 324, + 252 + ], + "spans": [ + { + "bbox": [ + 62, + 157, + 324, + 252 + ], + "type": "text", + "content": "Given an array of positive integers x. return a sorted array of all elements that hasn't any even digit. \nNote: Returned array should be sorted in increasing order. \nFor example: \n>>unique_digits([15,33,1422,1]) \n[1, 15, 33] \n>>unique_digits([152, 323, 1422, 10]) \n[]" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "code_body" + } + ], + "index": 5, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 52, + 263, + 92, + 271 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 263, + 92, + 271 + ], + "spans": [ + { + "bbox": [ + 52, + 263, + 92, + 271 + ], + "type": "text", + "content": "Assistant:" + } + ] + } + ], + "index": 6 + }, + { + "type": "code", + "bbox": [ + 61, + 282, + 329, + 455 + ], + "blocks": [ + { + "bbox": [ + 61, + 282, + 329, + 455 + ], + "lines": [ + { + "bbox": [ + 61, + 282, + 329, + 455 + ], + "spans": [ + { + "bbox": [ + 61, + 282, + 329, + 455 + ], + "type": "text", + "content": "001: function unique_digits(x: number[]) : number[] { \n002: const filteredArray: number[] = []; // initialize empty array \n003: for (let i = 0; i < x.length; i++) { \n004: let strx = \"\" + x[i]; \n005: let filteredStr = strx \n006: .ToString() \n007: .split(\"\"); \n008: .filter((digit) => parseInt(digit) % 2 != 0); \n009: if (filteredStr.length > 0) { \n010: filteredArray.push(...filteredStr); \n011: } \n012: } \n013: return filteredArray.sort((a, b) => a - b); \n014: }" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "code_body" + } + ], + "index": 7, + "sub_type": "code", + "guess_lang": "typescript" + }, + { + "bbox": [ + 52, + 470, + 73, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 470, + 73, + 478 + ], + "spans": [ + { + "bbox": [ + 52, + 470, + 73, + 478 + ], + "type": "text", + "content": "User:" + } + ] + } + ], + "index": 8 + }, + { + "type": "code", + "bbox": [ + 61, + 479, + 404, + 512 + ], + "blocks": [ + { + "bbox": [ + 61, + 479, + 404, + 512 + ], + "lines": [ + { + "bbox": [ + 61, + 479, + 404, + 512 + ], + "spans": [ + { + "bbox": [ + 61, + 479, + 404, + 512 + ], + "type": "text", + "content": "This output produced an error: \n../../../.././tmp/tmpaewlckfv.ts(10,26): error TS2345: Argument of type 'string' is not assignable to parameter of type 'number'." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "code_body" + } + ], + "index": 9, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 59, + 523, + 314, + 532 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 523, + 314, + 532 + ], + "spans": [ + { + "bbox": [ + 59, + 523, + 314, + 532 + ], + "type": "text", + "content": "Write the program again, and make sure to fix the error this time." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 52, + 543, + 92, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 543, + 92, + 552 + ], + "spans": [ + { + "bbox": [ + 52, + 543, + 92, + 552 + ], + "type": "text", + "content": "Assistant:" + } + ] + } + ], + "index": 11 + }, + { + "type": "code", + "bbox": [ + 61, + 553, + 246, + 573 + ], + "blocks": [ + { + "bbox": [ + 61, + 553, + 246, + 573 + ], + "lines": [ + { + "bbox": [ + 61, + 553, + 246, + 573 + ], + "spans": [ + { + "bbox": [ + 61, + 553, + 246, + 573 + ], + "type": "text", + "content": "```\n```script\nfunction unique_digits(x: number[]) : number[] {" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "code_body" + } + ], + "index": 12, + "sub_type": "code", + "guess_lang": "typescript" + }, + { + "bbox": [ + 42, + 583, + 440, + 635 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 583, + 440, + 635 + ], + "spans": [ + { + "bbox": [ + 42, + 583, + 440, + 635 + ], + "type": "text", + "content": "Figure 14. The full prompt for the repair task, where we simulate a multi-turn interaction. Text in green is based on the problem instance. blue is non-compilable model output from the synthesis step, in this case HumanEval #104 and Gemma 2 2B. red is compiler feedback. Generations from the synthesis step are annotated with line numbers to aid localizing the compiler error." + } + ] + } + ], + "index": 13, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 61, + 66, + 68 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 61, + 66, + 68 + ], + "spans": [ + { + "bbox": [ + 45, + 61, + 66, + 68 + ], + "type": "text", + "content": "171:34" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 165, + 60, + 440, + 70 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 60, + 440, + 70 + ], + "spans": [ + { + "bbox": [ + 165, + 60, + 440, + 70 + ], + "type": "text", + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 33 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 46, + 209, + 373, + 441 + ], + "blocks": [ + { + "bbox": [ + 46, + 209, + 373, + 441 + ], + "lines": [ + { + "bbox": [ + 46, + 209, + 373, + 441 + ], + "spans": [ + { + "bbox": [ + 46, + 209, + 373, + 441 + ], + "type": "text", + "content": "```\n```\nfunction next_smallest PALINDrome(num: number): number {\n const numStr = num.toString();\n const reversedNumStr = numStr.splitAGING).reverse().joinAGING);\n let min = Math.min(Number(numStr), Number(reversedNumStr));\n}\nif (min === Number(numStr)) {\n return Number(numStr);\n}\nelse {\n return parseInt(min.toString() + 1);\n}\n}\n}\nconst findNextSmallestPalindrome = next_smallest PALINDrome(12321);\nconsole.logfindNextSmallestPalindrome); // Output: 12322\n}\n**Explanation**\nThe function takes a number as input (^num>) in the given criteria." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_body" + } + ], + "index": 2, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 43, + 445, + 443, + 484 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 445, + 443, + 484 + ], + "spans": [ + { + "bbox": [ + 43, + 445, + 443, + 484 + ], + "type": "text", + "content": "Figure 15. The code generated by Gemma 2 2B for MBPP #0. The cutoff algorithm first extracts the fenced code, last line marked in blue. To avoid the usage of the console object from the browser extension of TypeScript in the generated examples, we further cut off after the last code block, marked in red." + } + ] + } + ], + "index": 3, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 43, + 60, + 240, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 60, + 240, + 69 + ], + "spans": [ + { + "bbox": [ + 43, + 60, + 240, + 69 + ], + "type": "text", + "content": "Type-Constrained Code Generation with Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 419, + 60, + 441, + 69 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 419, + 60, + 441, + 69 + ], + "spans": [ + { + "bbox": [ + 419, + 60, + 441, + 69 + ], + "type": "text", + "content": "171:35" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 34 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 46, + 110, + 269, + 287 + ], + "blocks": [ + { + "bbox": [ + 46, + 110, + 269, + 287 + ], + "lines": [ + { + "bbox": [ + 46, + 110, + 269, + 287 + ], + "spans": [ + { + "bbox": [ + 46, + 110, + 269, + 287 + ], + "type": "text", + "content": "1 function select_words(s: string, n: number): string[] { \n2 const result: string[] = []; \n3 for (const word of s.split()) { \n4 let nConsonants = 0; \n5 for (let i = 0; i < word.length; i++) { \n6 const char = word[i].toLowerCase(); \n7 if (!['a', 'e', 'i', 'o', 'u'].includes(char)) { \n8 nConsonants++; \n9 } \n10 } \n11 if (nConsonants === n) { \n12 result.push(word); \n13 } \n14 } \n15 return result; \n16 }" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_body" + } + ], + "index": 2, + "sub_type": "code", + "guess_lang": "typescript" + }, + { + "bbox": [ + 44, + 290, + 441, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 290, + 441, + 324 + ], + "spans": [ + { + "bbox": [ + 44, + 290, + 441, + 324 + ], + "type": "text", + "content": "(a) The code generated by the unconstrained Gemma 2 2B when translating a Python solution. The model tries to call the split member of a string object without parameters. This is legal in Python but not allowed in TypeScript." + } + ] + } + ], + "index": 3 + }, + { + "type": "code", + "bbox": [ + 46, + 341, + 273, + 547 + ], + "blocks": [ + { + "bbox": [ + 46, + 341, + 273, + 547 + ], + "lines": [ + { + "bbox": [ + 46, + 341, + 273, + 547 + ], + "spans": [ + { + "bbox": [ + 46, + 341, + 273, + 547 + ], + "type": "text", + "content": "1 function select_words(s: string, n: number): string[] { \n2 const result: string[] = []; \n3 for (const word of s.split()) { \n4 let nConsonants = 0; \n5 for (let i = 0; i < word.length; i++) { \n6 const letter = word[i].toLowerCase(); \n7 if (!['a', 'e', 'i', 'o', 'u'].includesletter)) { \n8 nConsonants++; \n9 } \n10 } \n11 if (nConsonants === n) { \n12 result.push(word); \n13 } \n14 } \n15 return result; \n16 }" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_body" + } + ], + "index": 4, + "sub_type": "code", + "guess_lang": "typescript" + }, + { + "bbox": [ + 44, + 552, + 441, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 552, + 441, + 573 + ], + "spans": [ + { + "bbox": [ + 44, + 552, + 441, + 573 + ], + "type": "text", + "content": "(b) The code generated by Gemma 2 2B with type constraints. The constraints guide the model and help it generate the required argument to split, which determines the characters around which to split the string." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 589, + 441, + 623 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 589, + 441, + 623 + ], + "spans": [ + { + "bbox": [ + 44, + 589, + 441, + 623 + ], + "type": "text", + "content": "Figure 16. Comparison between the function generated by unconstrained Gemma 2 2B [64] and the code generated with the guidance of type constraints on the problem select_words in HumanEval [14]. This problem requires a function that selects words from a given list that contain a specified number of consonants." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 61, + 66, + 68 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 61, + 66, + 68 + ], + "spans": [ + { + "bbox": [ + 45, + 61, + 66, + 68 + ], + "type": "text", + "content": "171:36" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 165, + 60, + 440, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 60, + 440, + 69 + ], + "spans": [ + { + "bbox": [ + 165, + 60, + 440, + 69 + ], + "type": "text", + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 35 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 47, + 207, + 232, + 285 + ], + "blocks": [ + { + "bbox": [ + 47, + 207, + 232, + 285 + ], + "lines": [ + { + "bbox": [ + 47, + 207, + 232, + 285 + ], + "spans": [ + { + "bbox": [ + 47, + 207, + 232, + 285 + ], + "type": "text", + "content": "function largest_divisor(n: number): number {\nfor(let i = n - 1; i > 0; i--) {\nif(n % i == 0) {\nreturn i;\n}\n}\n}" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_body" + } + ], + "index": 2, + "sub_type": "code", + "guess_lang": "typescript" + }, + { + "bbox": [ + 42, + 288, + 441, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 288, + 441, + 332 + ], + "spans": [ + { + "bbox": [ + 42, + 288, + 441, + 332 + ], + "type": "text", + "content": "(a) The code generated by the unconstrained Deepseek Coder 33B Instruct. The function loops over all numbers smaller than " + }, + { + "bbox": [ + 42, + 288, + 441, + 332 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 42, + 288, + 441, + 332 + ], + "type": "text", + "content": " to determine the largest divisor. Even though functionally correct for positive numbers, this function returns undefined when the return statement in the loop is never called (e.g., for negative inputs), violating the type system rules." + } + ] + } + ], + "index": 3 + }, + { + "type": "code", + "bbox": [ + 47, + 349, + 249, + 439 + ], + "blocks": [ + { + "bbox": [ + 47, + 349, + 249, + 439 + ], + "lines": [ + { + "bbox": [ + 47, + 349, + 249, + 439 + ], + "spans": [ + { + "bbox": [ + 47, + 349, + 249, + 439 + ], + "type": "text", + "content": "function largest_divisor(n: number): number {\nfor(let i = n - 1; i > 0; i--) {\nif(n % i == 0) {\nreturn i;\n}\n}\nreturn 1; // Return 1 if no such divisor found\n}" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_body" + } + ], + "index": 4, + "sub_type": "code", + "guess_lang": "typescript" + }, + { + "bbox": [ + 42, + 442, + 440, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 442, + 440, + 475 + ], + "spans": [ + { + "bbox": [ + 42, + 442, + 440, + 475 + ], + "type": "text", + "content": "(b) The code generated by DeepseekCoder 33B Instruct with type constraints. The system ensures that every path returns a value matching the annotated type. This forces the LLM to generate another line after the loop, and the model correctly chooses to generate a fallback return statement." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 42, + 492, + 441, + 524 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 492, + 441, + 524 + ], + "spans": [ + { + "bbox": [ + 42, + 492, + 441, + 524 + ], + "type": "text", + "content": "Figure 17. Comparison between the function generated by the unconstrained DeepSeekCoder 33B Instruct [28] and the code generated with the guidance of type constraints on the problem largest_divisor in HumanEval [14]. The problem asks to compute the largest divisor of a given number." + } + ] + } + ], + "index": 6, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 43, + 60, + 240, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 60, + 240, + 69 + ], + "spans": [ + { + "bbox": [ + 43, + 60, + 240, + 69 + ], + "type": "text", + "content": "Type-Constrained Code Generation with Language Models" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 419, + 60, + 440, + 68 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 419, + 60, + 440, + 68 + ], + "spans": [ + { + "bbox": [ + 419, + 60, + 440, + 68 + ], + "type": "text", + "content": "171:37" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 36 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 46, + 159, + 327, + 257 + ], + "blocks": [ + { + "bbox": [ + 46, + 159, + 327, + 257 + ], + "lines": [ + { + "bbox": [ + 46, + 159, + 327, + 257 + ], + "spans": [ + { + "bbox": [ + 46, + 159, + 327, + 257 + ], + "type": "text", + "content": "function intersperse(numbers: number[], delimiter: number): number[] { \nreturn numbers.reduce((acc, curr, index) => { \nacc.push(curr); \nif (index < numbers.length - 1) { \nacc.push(delimeter); \n} \nreturn acc; \n}, []); \n}" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_body" + } + ], + "index": 2, + "sub_type": "code", + "guess_lang": "typescript" + }, + { + "bbox": [ + 43, + 260, + 441, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 260, + 441, + 370 + ], + "spans": [ + { + "bbox": [ + 43, + 260, + 441, + 370 + ], + "type": "text", + "content": "(a) The code generated by the unconstrained Gemma 2 9B. The code uses the reduce member of arrays in TypeScript. The reduce member is a generic function that accepts a callback as first argument. This callback has three parameters: an accumulator, a current value and an index. The types of the second and third parameter are fixed to the element type of the callee array and number. The first argument type of the callback function is automatically inferred from the type of the second argument to the reduce function, in this case an empty array. The type of this array is never [], a special type denoting arrays without members. Since we can assign this type to the callee type number[], the accumulator parameter obtains type never[]. However, calling push on this type results in a type violation as the pushed value of type number is not compatible with the inferred element type never. This issue is well known in TypeScript and even human developers frequently stumble upon this issue [47, 48]." + } + ] + } + ], + "index": 3 + }, + { + "type": "code", + "bbox": [ + 46, + 386, + 341, + 485 + ], + "blocks": [ + { + "bbox": [ + 46, + 386, + 341, + 485 + ], + "lines": [ + { + "bbox": [ + 46, + 386, + 341, + 485 + ], + "spans": [ + { + "bbox": [ + 46, + 386, + 341, + 485 + ], + "type": "text", + "content": "function intersperse(numbers: number[], delimiter: number): number[] {\nreturn numbers.reduce((acc: number], num: number, index: number) => {\nif (index > 0) {\nacc.push(delimeter);\n}\nacc.push(num);\nreturn acc;\n}, []);\n}" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_body" + } + ], + "index": 4, + "sub_type": "code", + "guess_lang": "typescript" + }, + { + "bbox": [ + 43, + 490, + 441, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 490, + 441, + 522 + ], + "spans": [ + { + "bbox": [ + 43, + 490, + 441, + 522 + ], + "type": "text", + "content": "(b) The code generated by Gemma 2 9B [64] with type constraints. We force the model to generate type annotations for the callback function in reduce and thus enable the model to generate the correct type annotation and avoid the issue in the unconstrained code." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 43, + 539, + 441, + 571 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 539, + 441, + 571 + ], + "spans": [ + { + "bbox": [ + 43, + 539, + 441, + 571 + ], + "type": "text", + "content": "Figure 18. Comparison between the function generated by the unconstrained Gemma 2 9B [73] and the code generated with the guidance of type constraints on the problem intersperse in HumanEval [14]. The task in this problem is to insert a delimiter number between consecutive elements of an input list." + } + ] + } + ], + "index": 6, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 61, + 66, + 68 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 61, + 66, + 68 + ], + "spans": [ + { + "bbox": [ + 45, + 61, + 66, + 68 + ], + "type": "text", + "content": "171:38" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 165, + 60, + 440, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 60, + 440, + 69 + ], + "spans": [ + { + "bbox": [ + 165, + 60, + 440, + 69 + ], + "type": "text", + "content": "Niels Mündler*, Jingxuan He*, Hao Wang, Koushik Sen, Dawn Song, Martin Vechev" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 486, + 720 + ], + "page_idx": 37 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file